gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon import workflows
import logging
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_dashboard.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.loadbalancers \
import workflows as project_workflows
import re
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'monitor':
for obj_id in obj_ids:
try:
api.lbaas.pool_health_monitor_delete(request, obj_id)
messages.success(request, _('Deleted monitor %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete monitor. %s') % e)
if m == 'pool':
for obj_id in obj_ids:
try:
api.lbaas.pool_delete(request, obj_id)
messages.success(request, _('Deleted pool %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete pool. %s') % e)
if m == 'member':
for obj_id in obj_ids:
try:
api.lbaas.member_delete(request, obj_id)
messages.success(request, _('Deleted member %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete member. %s') % e)
if m == 'vip':
for obj_id in obj_ids:
try:
vip_id = api.lbaas.pool_get(request, obj_id).vip_id
except Exception as e:
exceptions.handle(request,
_('Unable to locate VIP to delete. %s')
% e)
if vip_id is not None:
try:
api.lbaas.vip_delete(request, vip_id)
messages.success(request, _('Deleted VIP %s') % vip_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VIP. %s') % e)
return self.get(request, *args, **kwargs)
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
def get_initial(self):
initial = super(AddPoolView, self).get_initial()
return initial
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_context_data(self, **kwargs):
context = super(AddVipView, self).get_context_data(**kwargs)
return context
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
def get_initial(self):
initial = super(AddMemberView, self).get_initial()
return initial
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
def get_initial(self):
initial = super(AddMonitorView, self).get_initial()
return initial
class PoolDetailsView(tabs.TabView):
tab_group_class = (project_tabs.PoolDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class VipDetailsView(tabs.TabView):
tab_group_class = (project_tabs.VipDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class MemberDetailsView(tabs.TabView):
tab_group_class = (project_tabs.MemberDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class MonitorDetailsView(tabs.TabView):
tab_group_class = (project_tabs.MonitorDetailsTabs)
template_name = 'project/loadbalancers/details_tabs.html'
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
pool_id = self.kwargs['pool_id']
try:
self._object = api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
vip_id = self.kwargs['vip_id']
try:
self._object = api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
vip = self._get_object()
stype = vip['session_persistence']
if stype['type'] == 'APP_COOKIE':
cookie = stype['cookie_name']
else:
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': vip['session_persistence']['type'],
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
member_id = self.kwargs['member_id']
try:
self._object = api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
monitor_id = self.kwargs['monitor_id']
try:
self._object = api.lbaas.pool_health_monitor_get(
self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
|
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Techniques to smooth jumps in location tracking. Each of these returns a
# boolean mask of inliers and outliers. We assume that the incoming dataframe
# has a column called "speed" that represents the speed at each point. The
# speed of the first point is zero.
# The result is in the inlier_mask field of the appropriate object
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import *
from past.utils import old_div
from builtins import object
import logging
import math
import pandas as pd
import numpy as np
import attrdict as ad
from enum import Enum
import emission.analysis.point_features as pf
import emission.core.common as ec
# logging.basicConfig(level=logging.DEBUG)
class SmoothBoundary(object):
def __init__(self, maxSpeed = 100):
self.maxSpeed = maxSpeed
def filter(self, with_speeds_df):
self.inlier_mask_ = [True] * with_speeds_df.shape[0]
prev_pt = None
for (i, pt) in enumerate(with_speeds_df[["mLatitude", "mLongitude", "mTime", "speed"]].to_dict('records')):
pt = ad.AttrDict(dict(pt))
if prev_pt is None:
# Don't have enough data yet, so don't make any decisions
prev_pt = pt
else:
currSpeed = pf.calSpeed(prev_pt, pt)
logging.debug("while considering point %s(%s), prev_pt (%s) speed = %s" % (pt, i, prev_pt, currSpeed))
if currSpeed > self.maxSpeed:
logging.debug("currSpeed > %s, removing index %s " % (self.maxSpeed, i))
self.inlier_mask_[i] = False
else:
logging.debug("currSpeed < %s, retaining index %s " % (self.maxSpeed, i))
prev_pt = pt
logging.info("Filtering complete, removed indices = %s" % np.nonzero(self.inlier_mask_))
# We intentionally don't use a dataframe for the segment list, using a
# segment class instead. The reasons are as follows:
# 1. We don't really need any of the fancy row, column or matrix
# operations. We are going to iterate over the list one step at a time.
# 2. We have potentially have to resplit segments, which involves inserting
# elements at arbitrary points in the list. With a dataframe, we would need
# to reindex every time we did this, which we won't have to do if we use a
# simple list. It should be trivial to re-implement this as a DataFrame if
# we so choose.
# This is only used by SmoothZigzag currently. I had originally made this an
# inner class, but inner classes in python are not special so let's move it out
# and make the indenting less complicated
class Segment(object):
State = Enum("Segment_State", "UNKNOWN BAD GOOD")
CLUSTER_RADIUS = 100
def __init__(self, start, end, zigzag_algo):
self.start = start
self.end = end
self.state = Segment.State.UNKNOWN
self.point_count = end - start
self.za = zigzag_algo
self.segment_df = self.za.with_speeds_df[start:end]
self.distance = self.za.cal_distance(self)
self.is_cluster = (self.distance < Segment.CLUSTER_RADIUS)
logging.debug("For cluster %s - %s, distance = %s, is_cluster = %s" %
(self.start, self.end, self.distance, self.is_cluster))
def __repr__(self):
return "Segment(%s, %s, %s)" % (self.start, self.end, self.distance)
class SmoothZigzag(object):
Direction = Enum("IterationDirection", 'RIGHT LEFT')
@staticmethod
def end_points_distance(segment):
if segment.start == segment.end:
raise RuntimeError("This is messed up segment. Investigate further")
return pf.calDistance(segment.segment_df.iloc[0], segment.segment_df.iloc[-1])
@staticmethod
def shortest_non_cluster_segment(segment_list):
assert(len(segment_list) > 0)
segment_distance_list = [[segment.distance, segment.is_cluster] for segment in segment_list]
segment_distance_df = pd.DataFrame(segment_distance_list, columns = ["distance", "is_cluster"])
non_cluster_segments = segment_distance_df[segment_distance_df.is_cluster == False]
logging.debug("non_cluster_segments %s" % non_cluster_segments)
if len(non_cluster_segments) == 0:
# If every segment is a cluster, then it is very hard to
# distinguish between them for zigzags. Let us see if there is any
# one point cluster - i.e. where the distance is zero. If so, that is likely
# to be a bad cluster, so we return the one to the right or left of it
minDistanceCluster = segment_distance_df.distance.idxmin()
if minDistanceCluster == 0:
goodCluster = minDistanceCluster + 1
assert(goodCluster < len(segment_list))
return goodCluster
else:
goodCluster = minDistanceCluster - 1
assert(goodCluster >= 0)
return goodCluster
retVal = non_cluster_segments.distance.idxmin()
logging.debug("shortest_non_cluster_segment = %s" % retVal)
return retVal
def __init__(self, is_ios, same_point_distance, maxSpeed = 100):
self.is_ios = is_ios
self.same_point_distance = same_point_distance
self.maxSpeed = maxSpeed
self.cal_distance = self.end_points_distance
self.find_start_segment = self.shortest_non_cluster_segment
def find_segments(self):
if self.is_ios:
segmentation_points = self.get_segmentation_points_ios()
else:
segmentation_points = self.get_segmentation_points_android()
segmentation_points.insert(0, 0)
last_point = self.with_speeds_df.shape[0]
if last_point not in segmentation_points:
logging.debug("smoothing: last_point index %s not in found points %s" %
(last_point, segmentation_points))
segmentation_points.insert(len(segmentation_points), last_point)
logging.debug("smoothing: added new entry %s" % segmentation_points[-1])
self.segment_list = [Segment(start, end, self) for (start, end) in
zip(segmentation_points, segmentation_points[1:])]
logging.debug("smoothing: segment_list = %s" % self.segment_list)
def get_segmentation_points_android(self):
return self.with_speeds_df[self.with_speeds_df.speed > self.maxSpeed].index.tolist()
def get_segmentation_points_ios(self):
jump_indices = self.with_speeds_df[self.with_speeds_df.speed > self.maxSpeed].index
# On iOS, as seen in ...., this is likely to be the jump back. We now need to find
# the jump to
jumps = self.with_speeds_df[(self.with_speeds_df.speed > self.maxSpeed) &
(self.with_speeds_df.distance > 100)].index
logging.debug("After first step, jumps = %s" % jumps)
all_jumps = []
for jump in jumps.tolist():
jump_to = self.with_speeds_df[(self.with_speeds_df.index < jump) & (
self.with_speeds_df.distance > 100)].index[-1]
logging.debug("for jump %s, jump_to = %s" % (jump, jump_to))
all_jumps.append(jump_to)
all_jumps.append(jump)
logging.debug("for ios, returning all_jumps = %s" % all_jumps)
return all_jumps
def split_segment(self, i, curr_seg, direction):
import emission.analysis.intake.cleaning.location_smoothing as ls
if direction == SmoothZigzag.Direction.RIGHT:
recomputed_speed_df = ls.recalc_speed(curr_seg.segment_df)
# Find the first point that does not belong to the cluster
new_split_point = recomputed_speed_df[recomputed_speed_df.distance > Segment.CLUSTER_RADIUS].index[0]
new_seg = Segment(new_split_point, curr_seg.end, self)
replace_seg = Segment(curr_seg.start, new_split_point, self)
self.segment_list[i] = replace_seg
self.segment_list.insert(i+1, new_seg)
return replace_seg
if direction == SmoothZigzag.Direction.LEFT:
# Need to compute speeds and distances from the left edge
recomputed_speed_df = ls.recalc_speed(curr_seg.segment_df.iloc[::-1])
logging.debug("Recomputed_speed_df = %s", recomputed_speed_df.speed)
# Find the first point that does not belong to the cluster
new_split_point = recomputed_speed_df[recomputed_speed_df.distance > Segment.CLUSTER_RADIUS].index[0]
logging.debug("new split point = %s", new_split_point)
new_seg = Segment(curr_seg.start, new_split_point + 1, self)
replace_seg = Segment(new_split_point + 1, curr_seg.end, self)
self.segment_list[i] = replace_seg
self.segment_list.insert(i, new_seg)
return replace_seg
@staticmethod
def toggle(expected_state):
assert expected_state == Segment.State.BAD or expected_state == Segment.State.GOOD, "Unable to toggle %s " % expected_state
if expected_state == Segment.State.BAD:
return Segment.State.GOOD
if expected_state == Segment.State.GOOD:
return Segment.State.BAD
def mark_segment_states(self, start_segment_idx, direction):
"""
This is the most complicated part of the algorithm.
"""
if direction == SmoothZigzag.Direction.RIGHT:
inc = 1
check = lambda i: i < len(self.segment_list)
if direction == SmoothZigzag.Direction.LEFT:
inc = -1
check = lambda i: i >= 0
i = start_segment_idx + inc
expected_state = Segment.State.BAD
while(check(i)):
curr_seg = self.segment_list[i]
logging.debug("Processing segment %d: %s, expecting state %s" % (i, curr_seg, expected_state))
assert curr_seg.state == Segment.State.UNKNOWN, "Attempting to overwite state for segment %s, curr state is %s" % (i, curr_seg.state)
if expected_state == Segment.State.BAD and not curr_seg.is_cluster: # mixed cluster case
curr_seg = self.split_segment(i, curr_seg, direction)
# We inserted a new segment before this, so this is now moved down by one.
# When we are moving right, we insert after this one, so the
# current index is not affected
if (direction == SmoothZigzag.Direction.LEFT):
i = i + 1
logging.debug("Finishing process for %s after splitting mixed cluster"% curr_seg)
assert curr_seg.is_cluster, "after splitting, the segment is not a cluster?!"
# In the mixed case, we just inserted an element, so we don't
# want to increment, because otherwise we will terminate too
# early. Note that the end conditions are embedded in the
# closure
i = i + inc
curr_seg.state = expected_state
expected_state = SmoothZigzag.toggle(expected_state)
logging.debug("At the end of the loop for direction %s, i = %s" % (direction, i))
logging.debug("Finished marking segment states for direction %s " % direction)
def filter(self, with_speeds_df):
self.inlier_mask_ = pd.Series([True] * with_speeds_df.shape[0])
self.with_speeds_df = with_speeds_df
self.find_segments()
logging.debug("After splitting, segment list is %s with size %s" %
(self.segment_list, len(self.segment_list)))
if len(self.segment_list) == 1:
# there were no jumps, so there's nothing to do
logging.info("No jumps, nothing to filter")
return
start_segment_idx = self.find_start_segment(self.segment_list)
self.segment_list[start_segment_idx].state = Segment.State.GOOD
self.mark_segment_states(start_segment_idx, SmoothZigzag.Direction.RIGHT)
self.mark_segment_states(start_segment_idx, SmoothZigzag.Direction.LEFT)
unknown_segments = [segment for segment in self.segment_list if segment.state == Segment.State.UNKNOWN]
logging.debug("unknown_segments = %s" % unknown_segments)
assert len(unknown_segments) == 0, "Found %s unknown segments - early termination of loop?" % len(unknown_segments)
bad_segments = [segment for segment in self.segment_list if segment.state == Segment.State.BAD]
logging.debug("bad_segments = %s" % bad_segments)
for segment in bad_segments:
self.inlier_mask_[segment.start:segment.end] = False
logging.debug("after setting values, outlier_mask = %s" % np.nonzero((self.inlier_mask_ == False).to_numpy()))
# logging.debug("point details are %s" % with_speeds_df[np.logical_not(self.inlier_mask_)])
# TODO: This is not the right place for this - adds too many dependencies
# Should do this in the outer class in general so that we can do
# multiple passes of any filtering algorithm
import emission.analysis.intake.cleaning.cleaning_methods.speed_outlier_detection as cso
import emission.analysis.intake.cleaning.location_smoothing as ls
recomputed_speeds_df = ls.recalc_speed(self.with_speeds_df[self.inlier_mask_])
recomputed_threshold = cso.BoxplotOutlier(ignore_zeros = True).get_threshold(recomputed_speeds_df)
# assert recomputed_speeds_df[recomputed_speeds_df.speed > recomputed_threshold].shape[0] == 0, "After first round, still have outliers %s" % recomputed_speeds_df[recomputed_speeds_df.speed > recomputed_threshold]
if recomputed_speeds_df[recomputed_speeds_df.speed > recomputed_threshold].shape[0] != 0:
logging.info("After first round, still have outliers %s" % recomputed_speeds_df[recomputed_speeds_df.speed > recomputed_threshold])
class SmoothPosdap(object):
def __init__(self, maxSpeed = 100):
self.maxSpeed = maxSpeed
def filter(self, with_speeds_df):
self.inlier_mask_ = [True] * with_speeds_df.shape[0]
quality_segments = []
curr_segment = []
prev_pt = None
for (i, pt) in enumerate(with_speeds_df.to_dict('records')):
pt = ad.AttrDict(pt)
if prev_pt is None:
# Don't have enough data yet, so don't make any decisions
prev_pt = pt
else:
currSpeed = pf.calSpeed(prev_pt, pt)
print("while considering point %s, speed = %s" % (i, currSpeed))
# Should make this configurable
if currSpeed > self.maxSpeed:
print("currSpeed > %d, starting new quality segment at index %s " % (self.maxSpeed, i))
quality_segments.append(curr_segment)
curr_segment = []
else:
print("currSpeed < %d, retaining index %s in existing quality segment " % (self.maxSpeed, i))
prev_pt = pt
curr_segment.append(i)
# Append the last segment once we are at the end
quality_segments.append(curr_segment)
print("Number of quality segments is %d" % len(quality_segments))
last_segment = quality_segments[0]
for curr_segment in quality_segments[1:]:
print("Considering segments %s and %s" % (last_segment, curr_segment))
if len(last_segment) == 0:
# If the last segment has no points, we can't compare last and
# current, but should reset last, otherwise, we will be stuck
# forever
logging.info("len(last_segment) = %d, len(curr_segment) = %d, skipping" %
(len(last_segment), len(curr_segment)))
last_segment = curr_segment
continue
if len(curr_segment) == 0:
# If the current segment has no points, we can't compare last and
# current, but can just continue since the for loop will reset current
logging.info("len(last_segment) = %d, len(curr_segment) = %d, skipping" %
(len(last_segment), len(curr_segment)))
continue
get_coords = lambda i: [with_speeds_df.iloc[i]["mLongitude"], with_speeds_df.iloc[i]["mLatitude"]]
get_ts = lambda i: with_speeds_df.iloc[i]["mTime"]
# I don't know why they would use time instead of distance, but
# this is what the existing POSDAP code does.
print("About to compare curr_segment duration %s with last segment duration %s" %
(get_ts(curr_segment[-1]) - get_ts(curr_segment[0]),
get_ts(last_segment[-1]) - get_ts(last_segment[0])))
if (get_ts(curr_segment[-1]) - get_ts(curr_segment[0]) <=
get_ts(last_segment[-1]) - get_ts(last_segment[0])):
print("curr segment %s is shorter, cut it" % curr_segment)
ref_idx = last_segment[-1]
for curr_idx in curr_segment:
print("Comparing distance %s with speed %s * time %s = %s" %
(math.fabs(ec.calDistance(get_coords(ref_idx), get_coords(curr_idx))),
old_div(self.maxSpeed, 100), abs(get_ts(ref_idx) - get_ts(curr_idx)),
self.maxSpeed / 100 * abs(get_ts(ref_idx) - get_ts(curr_idx))))
if (math.fabs(ec.calDistance(get_coords(ref_idx), get_coords(curr_idx))) >
(self.maxSpeed / 1000 * abs(get_ts(ref_idx) - get_ts(curr_idx)))):
print("Distance is greater than max speed * time, deleting %s" % curr_idx)
self.inlier_mask_[curr_idx] = False
else:
print("prev segment %s is shorter, cut it" % last_segment)
ref_idx = curr_segment[-1]
for curr_idx in reversed(last_segment):
print("Comparing distance %s with speed %s * time %s = %s" %
(math.fabs(ec.calDistance(get_coords(ref_idx), get_coords(curr_idx))),
old_div(self.maxSpeed, 1000) , abs(get_ts(ref_idx) - get_ts(curr_idx)),
self.maxSpeed / 1000 * abs(get_ts(ref_idx) - get_ts(curr_idx))))
if (abs(ec.calDistance(get_coords(ref_idx), get_coords(curr_idx))) >
(self.maxSpeed / 1000 * abs(get_ts(ref_idx) - get_ts(curr_idx)))):
print("Distance is greater than max speed * time, deleting %s" % curr_idx)
self.inlier_mask_[curr_idx] = False
last_segment = curr_segment
logging.info("Filtering complete, removed indices = %s" % np.nonzero(self.inlier_mask_))
class SmoothPiecewiseRansac(object):
def __init__(self, maxSpeed = 100):
self.maxSpeed = maxSpeed
def filter_area_using_ransac(self, area_df):
from sklearn import linear_model
import numpy as np
latArr = [[lat] for lat in area_df.mLatitude.to_numpy()]
lngArr = area_df.mLongitude.to_numpy()
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(latArr, lngArr)
inlier_mask = model_ransac.inlier_mask_
logging.debug("In area %s - %s, deleted %d points through ransac filtering" %
(area_df.index[0], area_df.index[-1], np.count_nonzero(np.logical_not(inlier_mask))))
return inlier_mask
def find_areas_of_interest(self, with_speeds_df):
candidateIndices = np.nonzero(with_speeds_df.speed > self.maxSpeed)[0]
logging.debug("Found %d potential outliers, list = %s" % (len(candidateIndices), candidateIndices))
if len(candidateIndices) == 0:
logging.info("No potential outliers (%s), so no areas to consider", candidateIndices)
return []
if len(candidateIndices) == 1:
candidateClusterCenters = [candidateIndices]
logging.debug("Only one candidate, cluster centers are %s" % candidateClusterCenters)
else:
from sklearn.cluster import AffinityPropagation
af = AffinityPropagation().fit([[i] for i in candidateIndices])
candidateClusterCenters = af.cluster_centers_
logging.debug("Found %d clusters with centers %s" % (len(candidateClusterCenters), candidateClusterCenters))
dfList = []
for cc in candidateClusterCenters:
logging.debug("Considering candidate cluster center %s" % cc)
lowRange = max(cc[0]-5,0)
highRange = min(cc[0]+5,with_speeds_df.shape[0])
logging.debug("lowRange = max(%s, %s) = %s and highRange = max(%s, %s) = %s" % (cc[0]-5,0,lowRange,cc[0]+5,with_speeds_df.shape[0],highRange))
dfList.append(with_speeds_df.loc[lowRange:highRange])
return dfList
def filter(self, with_speeds_df):
ransac_mask = pd.Series([True] * with_speeds_df.shape[0])
areas_of_interest = self.find_areas_of_interest(with_speeds_df)
for area in areas_of_interest:
logging.debug("Area size = %s, index = %s with size %s" % (area.shape[0], area.index, len(area.index)))
retain_mask = self.filter_area_using_ransac(area)
logging.debug("Retain mask is of size %d" % len(retain_mask))
ransac_mask[area.index] = retain_mask
logging.debug("with speed df shape is %s, ransac_mask size = %s" % (with_speeds_df.shape, len(ransac_mask)))
logging.debug("filtering done, ransac deleted points = %s" % np.nonzero(ransac_mask == False))
self.inlier_mask_ = ransac_mask.to_numpy().tolist()
|
|
# -*- encoding: utf-8 -*-
"""The home of the various metrics we gather."""
import logging
import os
import socket
from abc import ABCMeta
from abc import abstractmethod
from datetime import datetime
from datetime import timezone
import boto3
import psutil
class Metric(metaclass=ABCMeta):
"""ABC for metrics we track in CloudWatch.
Usage::
class FooMetric(Metric):
def __init__(self, **kwargs):
super().__init__('Foo', **kwargs)
self.unit = "foos"
def _capture(self):
self.value = 1
FooMetric().put()
"""
def __init__(self, name, cw_client=None, **kwargs):
"""Initialize a new ``Metric``.
:param name: the name of the new ``Metric``
:type name: :class:`str`
:param cw_client: the CloudWatch client to use to push this ``Metric``
up to AWS. If you do not provide one, a new one will
be created.
:type cw_client: :class:`~botocore.client.CloudWatch` or `None`
.. note:: This *will* trigger whatever capturing logic is required to
gather the relevant data.
.. note:: The ``Namespace`` of the metric defaults to the hostname.
"""
self.cloudwatch = (cw_client or
boto3.client('cloudwatch', 'us-east-1'))
self.namespace = socket.gethostname()
self.name = name
self.value = None
self.unit = None
self.timestamp = datetime.now(timezone.utc)
self._capture()
@abstractmethod
def _capture(self):
"""Capture the actual value to be saved."""
pass
def put(self):
"""Push the info represented by this ``Metric`` to CloudWatch."""
try:
self.cloudwatch.put_metric_data(
Namespace=self.namespace,
MetricData=[{
'MetricName': self.name,
'Value': self.value,
'Timestamp': self.timestamp
}]
)
except Exception:
logging.exception("Error pushing {0} to CloudWatch.".format(str(self)))
def __str__(self):
"""A human-readable representation of this ``Metric``."""
return "{0} - {1}: {2} {3}".format(self.namespace, self.name,
self.value, self.unit)
class DiskFreeSpaceMetric(Metric):
"""Information about the free space on a device."""
_UNITS = ('bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
def __init__(self, mountpoint):
"""Create a new free space metric."""
self.mountpoint = mountpoint
super().__init__("Disk Free Space")
self.name = "{0} ({1})".format(self.name, self.mountpoint)
def _capture(self):
self.value = psutil.disk_usage(self.mountpoint).free
unit_index = 0
while self.value >= 1024:
self.value = self.value / 1024
unit_index += 1
self.unit = DiskFreeSpaceMetric._UNITS[unit_index]
class DiskPercentFreeSpaceMetric(Metric):
"""Information about the percent of free space on a device."""
def __init__(self, mountpoint):
"""Create a new free space metric."""
self.mountpoint = mountpoint
super().__init__("Disk Percent Free Space")
self.name = "{0} ({1})".format(self.name, self.mountpoint)
def _capture(self):
self.value = 100 - psutil.disk_usage(self.mountpoint).percent
self.unit = "%"
class DiskFreeInodesMetric(Metric):
"""Information about the free inodes on a device."""
def __init__(self, mountpoint):
"""Create a new free space metric."""
self.mountpoint = mountpoint
super().__init__("Disk Free Inodes")
self.name = "{0} ({1})".format(self.name, self.mountpoint)
def _capture(self):
self.value = os.statvfs(self.mountpoint).f_ffree
self.unit = "inodes"
class DiskPercentFreeInodesMetric(Metric):
"""Information about the percent of free inodes on a device."""
def __init__(self, mountpoint):
"""Create a new % of inodes that are freee metric."""
self.mountpoint = mountpoint
super().__init__("Disk Percent Free Inodes")
self.name = "{0} ({1})".format(self.name, self.mountpoint)
def _capture(self):
free = os.statvfs(self.mountpoint).f_ffree
total = os.statvfs(self.mountpoint).f_files
self.value = round(100 * (free / total), 1)
self.unit = "%"
class TotalProcessesMetric(Metric):
"""Information about the total number of processes."""
def __init__(self):
"""Create a new metric for the number of total processes."""
super().__init__("Total Processes Metric")
def _capture(self):
self.value = len(psutil.pids())
self.unit = "processes"
class ZombieProcessesMetric(Metric):
"""Information about the number of zombie processes."""
def __init__(self):
"""Create a new metric for the number of zombie processes."""
super().__init__("Zombie Processes Metric")
def _capture(self):
zombies = [p for p in psutil.process_iter()
if p.status() == psutil.STATUS_ZOMBIE]
self.value = len(zombies)
self.unit = "zombie processes"
class OneMinuteLoadAvgMetric(Metric):
"""The 1-minute load average."""
def __init__(self):
"""Create a new metric for the 1-minute load avg."""
super().__init__("1-Minute Load Avg")
def _capture(self):
self.value = os.getloadavg()[0]
class FiveMinuteLoadAvgMetric(Metric):
"""The 5-minute load average."""
def __init__(self):
"""Create a new metric for the 5-minute load avg."""
super().__init__("5-Minute Load Avg")
def _capture(self):
self.value = os.getloadavg()[1]
class FifteenMinuteLoadAvgMetric(Metric):
"""The 15-minute load average."""
def __init__(self):
"""Create a new metric for the 15-minute load avg."""
super().__init__("15-Minute Load Avg")
def _capture(self):
self.value = os.getloadavg()[2]
class CpuPercentageMetric(Metric):
"""The percent of the CPU currently being used."""
def __init__(self):
"""Create a new metric for the percent of the CPU being used."""
super().__init__("CPU Percentage")
def _capture(self):
self.value = psutil.cpu_percent(interval=.1)
class CpuContextSwitchesMetric(Metric):
"""The number of CPU context switches since boot."""
def __init__(self):
"""Create a new metric for the number of CPU context switches since boot."""
super().__init__("CPU Context Switches")
def _capture(self):
self.value = psutil.cpu_stats().ctx_switches
class MemoryAvailableMetric(Metric):
"""The amount of memory 'available'.
This number is calculated different ways depending on platform, and
is likely not the same as physically unallocated memory, thanks to
virtual memory.
"""
_UNITS = ('bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
def __init__(self):
"""Create a new metric for the amount of RAM available."""
super().__init__("Available Memory")
def _capture(self):
self.value = psutil.virtual_memory().available
unit_index = 0
while self.value >= 1024:
self.value = self.value / 1024
unit_index += 1
self.unit = MemoryAvailableMetric._UNITS[unit_index]
class MemoryAvailablePercentageMetric(Metric):
"""The amount of memory 'available' as a percentage of total memory.
This number is calculated different ways depending on platform, and
is likely not the same as physically unallocated memory, thanks to
virtual memory.
"""
def __init__(self):
"""Create a new metric for the amount of RAM available."""
super().__init__("Available Memory Percentage")
def _capture(self):
self.value = round(100 - psutil.virtual_memory().percent, 1)
self.unit = '%'
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os, json
from frappe import _
from frappe.modules import scrub, get_module_path
from frappe.utils import flt, cint, get_html_format, cstr
from frappe.translate import send_translations
import frappe.desk.reportview
from frappe.permissions import get_role_permissions
def get_report_doc(report_name):
doc = frappe.get_doc("Report", report_name)
if not doc.has_permission("read"):
frappe.throw(_("You don't have access to Report: {0}").format(report_name), frappe.PermissionError)
if not frappe.has_permission(doc.ref_doctype, "report"):
frappe.throw(_("You don't have permission to get a report on: {0}").format(doc.ref_doctype),
frappe.PermissionError)
if doc.disabled:
frappe.throw(_("Report {0} is disabled").format(report_name))
return doc
@frappe.whitelist()
def get_script(report_name):
report = get_report_doc(report_name)
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
module_path = get_module_path(module)
report_folder = os.path.join(module_path, "report", scrub(report.name))
script_path = os.path.join(report_folder, scrub(report.name) + ".js")
print_path = os.path.join(report_folder, scrub(report.name) + ".html")
script = None
if os.path.exists(script_path):
with open(script_path, "r") as f:
script = f.read()
html_format = get_html_format(print_path)
if not script and report.javascript:
script = report.javascript
if not script:
script = "frappe.query_reports['%s']={}" % report_name
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("report", report_name))
return {
"script": script,
"html_format": html_format
}
@frappe.whitelist()
def run(report_name, filters=None, user=None):
report = get_report_doc(report_name)
if not user:
user = frappe.session.user
if not filters:
filters = []
if filters and isinstance(filters, basestring):
filters = json.loads(filters)
if not frappe.has_permission(report.ref_doctype, "report"):
frappe.msgprint(_("Must have report permission to access this report."),
raise_exception=True)
columns, result, message, chart = [], [], None, None
if report.report_type=="Query Report":
if not report.query:
frappe.msgprint(_("Must specify a Query to run"), raise_exception=True)
if not report.query.lower().startswith("select"):
frappe.msgprint(_("Query must be a SELECT"), raise_exception=True)
result = [list(t) for t in frappe.db.sql(report.query, filters)]
columns = [cstr(c[0]) for c in frappe.db.get_description()]
else:
module = report.module or frappe.db.get_value("DocType", report.ref_doctype, "module")
if report.is_standard=="Yes":
method_name = get_report_module_dotted_path(module, report.name) + ".execute"
res = frappe.get_attr(method_name)(frappe._dict(filters))
columns, result = res[0], res[1]
if len(res) > 2:
message = res[2]
if len(res) > 3:
chart = res[3]
if report.apply_user_permissions and result:
result = get_filtered_data(report.ref_doctype, columns, result, user)
if cint(report.add_total_row) and result:
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns,
"message": message,
"chart": chart
}
def get_report_module_dotted_path(module, report_name):
return frappe.local.module_app[scrub(module)] + "." + scrub(module) \
+ ".report." + scrub(report_name) + "." + scrub(report_name)
def add_total_row(result, columns, meta = None):
total_row = [""]*len(columns)
has_percent = []
for i, col in enumerate(columns):
fieldtype, options = None, None
if isinstance(col, basestring):
if meta:
# get fieldtype from the meta
field = meta.get_field(col)
if field:
fieldtype = meta.get_field(col).fieldtype
else:
col = col.split(":")
if len(col) > 1:
fieldtype = col[1]
if "/" in fieldtype:
fieldtype, options = fieldtype.split("/")
else:
fieldtype = col.get("fieldtype")
options = col.get("options")
for row in result:
if fieldtype in ["Currency", "Int", "Float", "Percent"] and flt(row[i]):
total_row[i] = flt(total_row[i]) + flt(row[i])
if fieldtype == "Percent" and i not in has_percent:
has_percent.append(i)
if fieldtype=="Link" and options == "Currency":
total_row[i] = result[0][i]
for i in has_percent:
total_row[i] = total_row[i] / len(result)
first_col_fieldtype = None
if isinstance(columns[0], basestring):
first_col = columns[0].split(":")
if len(first_col) > 1:
first_col_fieldtype = first_col[1].split("/")[0]
else:
first_col_fieldtype = columns[0].get("fieldtype")
if first_col_fieldtype not in ["Currency", "Int", "Float", "Percent"]:
if first_col_fieldtype == "Link":
total_row[0] = "'" + _("Total") + "'"
else:
total_row[0] = _("Total")
result.append(total_row)
return result
def get_filtered_data(ref_doctype, columns, data, user):
result = []
linked_doctypes = get_linked_doctypes(columns, data)
match_filters_per_doctype = get_user_match_filters(linked_doctypes, ref_doctype)
shared = frappe.share.get_shared(ref_doctype, user)
columns_dict = get_columns_dict(columns)
role_permissions = get_role_permissions(frappe.get_meta(ref_doctype), user)
if_owner = role_permissions.get("if_owner", {}).get("report")
if match_filters_per_doctype:
for row in data:
# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed
if linked_doctypes.get(ref_doctype) and shared and row[linked_doctypes[ref_doctype]] in shared:
result.append(row)
elif has_match(row, linked_doctypes, match_filters_per_doctype, ref_doctype, if_owner, columns_dict, user):
result.append(row)
else:
result = list(data)
return result
def has_match(row, linked_doctypes, doctype_match_filters, ref_doctype, if_owner, columns_dict, user):
"""Returns True if after evaluating permissions for each linked doctype
- There is an owner match for the ref_doctype
- `and` There is a user permission match for all linked doctypes
Returns True if the row is empty
Note:
Each doctype could have multiple conflicting user permission doctypes.
Hence even if one of the sets allows a match, it is true.
This behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.
"""
resultant_match = True
if not row:
# allow empty rows :)
return resultant_match
for doctype, filter_list in doctype_match_filters.items():
matched_for_doctype = False
if doctype==ref_doctype and if_owner:
idx = linked_doctypes.get("User")
if (idx is not None
and row[idx]==user
and columns_dict[idx]==columns_dict.get("owner")):
# owner match is true
matched_for_doctype = True
if not matched_for_doctype:
for match_filters in filter_list:
match = True
for dt, idx in linked_doctypes.items():
# case handled above
if dt=="User" and columns_dict[idx]==columns_dict.get("owner"):
continue
if dt in match_filters and row[idx] not in match_filters[dt]:
match = False
break
# each doctype could have multiple conflicting user permission doctypes, hence using OR
# so that even if one of the sets allows a match, it is true
matched_for_doctype = matched_for_doctype or match
if matched_for_doctype:
break
# each doctype's user permissions should match the row! hence using AND
resultant_match = resultant_match and matched_for_doctype
if not resultant_match:
break
return resultant_match
def get_linked_doctypes(columns, data):
linked_doctypes = {}
columns_dict = get_columns_dict(columns)
for idx, col in enumerate(columns):
df = columns_dict[idx]
if df.get("fieldtype")=="Link":
if isinstance(col, basestring):
linked_doctypes[df["options"]] = idx
else:
# dict
linked_doctypes[df["options"]] = df["fieldname"]
# remove doctype if column is empty
columns_with_value = []
for row in data:
if row:
if len(row) != len(columns_with_value):
if isinstance(row, (list, tuple)):
row = enumerate(row)
elif isinstance(row, dict):
row = row.items()
for col, val in row:
if val and col not in columns_with_value:
columns_with_value.append(col)
for doctype, key in linked_doctypes.items():
if key not in columns_with_value:
del linked_doctypes[doctype]
return linked_doctypes
def get_columns_dict(columns):
"""Returns a dict with column docfield values as dict
The keys for the dict are both idx and fieldname,
so either index or fieldname can be used to search for a column's docfield properties
"""
columns_dict = {}
for idx, col in enumerate(columns):
col_dict = {}
# string
if isinstance(col, basestring):
col = col.split(":")
if len(col) > 1:
if "/" in col[1]:
col_dict["fieldtype"], col_dict["options"] = col[1].split("/")
else:
col_dict["fieldtype"] = col[1]
col_dict["fieldname"] = frappe.scrub(col[0])
# dict
else:
col_dict.update(col)
if "fieldname" not in col_dict:
col_dict["fieldname"] = frappe.scrub(col_dict["label"])
columns_dict[idx] = col_dict
columns_dict[col_dict["fieldname"]] = col_dict
return columns_dict
def get_user_match_filters(doctypes, ref_doctype):
match_filters = {}
for dt in doctypes:
filter_list = frappe.desk.reportview.build_match_conditions(dt, False)
if filter_list:
match_filters[dt] = filter_list
return match_filters
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_private_link_resources_request(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_a_private_endpoint_connection_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_a_private_endpoint_connection_request(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_a_private_endpoint_connection_request_initial(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_private_endpoint_connections_request(
subscription_id: str,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DiskAccessesOperations(object):
"""DiskAccessesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccess",
**kwargs: Any
) -> "_models.DiskAccess":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_access, 'DiskAccess')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccess",
**kwargs: Any
) -> LROPoller["_models.DiskAccess"]:
"""Creates or updates a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:param disk_access: disk access object supplied in the body of the Put disk access operation.
:type disk_access: ~azure.mgmt.compute.v2021_12_01.models.DiskAccess
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskAccess or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_12_01.models.DiskAccess]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
disk_access=disk_access,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccessUpdate",
**kwargs: Any
) -> "_models.DiskAccess":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_access, 'DiskAccessUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
disk_access_name: str,
disk_access: "_models.DiskAccessUpdate",
**kwargs: Any
) -> LROPoller["_models.DiskAccess"]:
"""Updates (patches) a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:param disk_access: disk access object supplied in the body of the Patch disk access operation.
:type disk_access: ~azure.mgmt.compute.v2021_12_01.models.DiskAccessUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskAccess or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_12_01.models.DiskAccess]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
disk_access=disk_access,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> "_models.DiskAccess":
"""Gets information about a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskAccess, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_12_01.models.DiskAccess
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccess"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskAccess', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DiskAccessList"]:
"""Lists all the disk access resources under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskAccessList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_12_01.models.DiskAccessList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccessList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskAccessList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DiskAccessList"]:
"""Lists all the disk access resources under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskAccessList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_12_01.models.DiskAccessList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskAccessList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskAccessList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses'} # type: ignore
@distributed_trace
def get_private_link_resources(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResourceListResult":
"""Gets the private link resources possible under disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_12_01.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_private_link_resources_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self.get_private_link_resources.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_link_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources'} # type: ignore
def _update_a_private_endpoint_connection_initial(
self,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
request = build_update_a_private_endpoint_connection_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
private_endpoint_connection_name=private_endpoint_connection_name,
content_type=content_type,
json=_json,
template_url=self._update_a_private_endpoint_connection_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_a_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def begin_update_a_private_endpoint_connection(
self,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> LROPoller["_models.PrivateEndpointConnection"]:
"""Approve or reject a private endpoint connection under disk access resource, this can't be used
to create a new private endpoint connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: private endpoint connection object supplied in the body of
the Put private endpoint connection operation.
:type private_endpoint_connection:
~azure.mgmt.compute.v2021_12_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_12_01.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_a_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_a_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def get_a_private_endpoint_connection(
self,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Gets information about a private endpoint connection under a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_12_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_a_private_endpoint_connection_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self.get_a_private_endpoint_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_a_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_a_private_endpoint_connection_initial(
self,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_a_private_endpoint_connection_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self._delete_a_private_endpoint_connection_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_a_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def begin_delete_a_private_endpoint_connection(
self,
resource_group_name: str,
disk_access_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a private endpoint connection under a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_a_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_a_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace
def list_private_endpoint_connections(
self,
resource_group_name: str,
disk_access_name: str,
**kwargs: Any
) -> Iterable["_models.PrivateEndpointConnectionListResult"]:
"""List information about private endpoint connections under a disk access resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_access_name: The name of the disk access resource that is being created. The name
can't be changed after the disk encryption set is created. Supported characters for the name
are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters.
:type disk_access_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_12_01.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_private_endpoint_connections_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=self.list_private_endpoint_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_private_endpoint_connections_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_access_name=disk_access_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_private_endpoint_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections'} # type: ignore
|
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_almost_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
if Cls is GradientBoostingRegressor:
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
else:
# Random state is preserved and hence predict_proba must also be
# same
assert_array_equal(est_ws.predict(X), est.predict(X))
assert_array_almost_equal(est_ws.predict_proba(X),
est.predict_proba(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier]
for GBEstimator in all_estimators:
est = GBEstimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_.flat:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier]
for GBEstimator in all_estimators:
est = GBEstimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_.flat:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
assert_array_almost_equal(sparse.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(dense.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(
np.array(sparse.staged_decision_function(X_sparse)),
np.array(sparse.staged_decision_function(X)))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
random_state=42)
gbr = GradientBoostingRegressor(n_estimators=1000, n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in ((gbc, 1e-1, 24), (gbr, 1e-1, 13),
(gbc, 1e-3, 36),
(gbr, 1e-3, 28)):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert_equal(est.n_estimators_, early_stop_n_estimators)
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,
max_depth=3, random_state=42)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(n_estimators=200, learning_rate=0.1,
max_depth=3, random_state=42)
gbr.fit(X, y)
assert gbc.n_estimators_ == 100
assert gbr.n_estimators_ == 200
def test_gradient_boosting_validation_fraction():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(n_estimators=100,
n_iter_no_change=10,
validation_fraction=0.1,
learning_rate=0.1, max_depth=3,
random_state=42)
gbc2 = clone(gbc).set_params(validation_fraction=0.3)
gbc3 = clone(gbc).set_params(n_iter_no_change=20)
gbr = GradientBoostingRegressor(n_estimators=100, n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
validation_fraction=0.1,
random_state=42)
gbr2 = clone(gbr).set_params(validation_fraction=0.3)
gbr3 = clone(gbr).set_params(n_iter_no_change=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if validation_fraction has an effect
gbc.fit(X_train, y_train)
gbc2.fit(X_train, y_train)
assert gbc.n_estimators_ != gbc2.n_estimators_
gbr.fit(X_train, y_train)
gbr2.fit(X_train, y_train)
assert gbr.n_estimators_ != gbr2.n_estimators_
# Check if n_estimators_ increase monotonically with n_iter_no_change
# Set validation
gbc3.fit(X_train, y_train)
gbr3.fit(X_train, y_train)
assert gbr.n_estimators_ < gbr3.n_estimators_
assert gbc.n_estimators_ < gbc3.n_estimators_
|
|
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_utils import units
from nova import exception
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import volumeops
CONF = cfg.CONF
connection_data = {'volume_id': 'fake_vol_id',
'target_lun': mock.sentinel.fake_lun,
'target_iqn': mock.sentinel.fake_iqn,
'target_portal': mock.sentinel.fake_portal,
'auth_method': 'chap',
'auth_username': mock.sentinel.fake_user,
'auth_password': mock.sentinel.fake_pass}
def get_fake_block_dev_info():
return {'block_device_mapping': [
fake_block_device.AnonFakeDbBlockDeviceDict({'source_type': 'volume'})]
}
def get_fake_connection_info(**kwargs):
return {'data': dict(connection_data, **kwargs),
'serial': mock.sentinel.serial}
class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for VolumeOps class."""
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self._volumeops = volumeops.VolumeOps()
self._volumeops._volutils = mock.MagicMock()
self._volumeops._vmutils = mock.Mock()
def test_get_volume_driver(self):
fake_conn_info = {'driver_volume_type': mock.sentinel.fake_driver_type}
self._volumeops.volume_drivers[mock.sentinel.fake_driver_type] = (
mock.sentinel.fake_driver)
result = self._volumeops._get_volume_driver(
connection_info=fake_conn_info)
self.assertEqual(mock.sentinel.fake_driver, result)
def test_get_volume_driver_exception(self):
fake_conn_info = {'driver_volume_type': 'fake_driver'}
self.assertRaises(exception.VolumeDriverNotFound,
self._volumeops._get_volume_driver,
connection_info=fake_conn_info)
@mock.patch.object(volumeops.VolumeOps, 'attach_volume')
def test_attach_volumes(self, mock_attach_volume):
block_device_info = get_fake_block_dev_info()
self._volumeops.attach_volumes(
block_device_info['block_device_mapping'],
mock.sentinel.instance_name)
mock_attach_volume.assert_called_once_with(
block_device_info['block_device_mapping'][0]['connection_info'],
mock.sentinel.instance_name)
def test_fix_instance_volume_disk_paths_empty_bdm(self):
self._volumeops.fix_instance_volume_disk_paths(
mock.sentinel.instance_name,
block_device_info={})
self.assertFalse(
self._volumeops._vmutils.get_vm_physical_disk_mapping.called)
@mock.patch.object(volumeops.VolumeOps, 'get_disk_path_mapping')
def test_fix_instance_volume_disk_paths(self, mock_get_disk_path_mapping):
block_device_info = get_fake_block_dev_info()
mock_disk1 = {
'mounted_disk_path': mock.sentinel.mounted_disk1_path,
'resource_path': mock.sentinel.resource1_path
}
mock_disk2 = {
'mounted_disk_path': mock.sentinel.mounted_disk2_path,
'resource_path': mock.sentinel.resource2_path
}
mock_vm_disk_mapping = {
mock.sentinel.disk1_serial: mock_disk1,
mock.sentinel.disk2_serial: mock_disk2
}
# In this case, only the first disk needs to be updated.
mock_phys_disk_path_mapping = {
mock.sentinel.disk1_serial: mock.sentinel.actual_disk1_path,
mock.sentinel.disk2_serial: mock.sentinel.mounted_disk2_path
}
vmutils = self._volumeops._vmutils
vmutils.get_vm_physical_disk_mapping.return_value = (
mock_vm_disk_mapping)
mock_get_disk_path_mapping.return_value = mock_phys_disk_path_mapping
self._volumeops.fix_instance_volume_disk_paths(
mock.sentinel.instance_name,
block_device_info)
vmutils.get_vm_physical_disk_mapping.assert_called_once_with(
mock.sentinel.instance_name)
mock_get_disk_path_mapping.assert_called_once_with(
block_device_info)
vmutils.set_disk_host_res.assert_called_once_with(
mock.sentinel.resource1_path,
mock.sentinel.actual_disk1_path)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_disconnect_volumes(self, mock_get_volume_driver):
block_device_info = get_fake_block_dev_info()
block_device_mapping = block_device_info['block_device_mapping']
fake_volume_driver = mock_get_volume_driver.return_value
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
block_device_mapping[0]['connection_info'])
@mock.patch('time.sleep')
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def _test_attach_volume(self, mock_get_volume_driver, mock_sleep,
attach_failed):
fake_conn_info = get_fake_connection_info(
qos_specs=mock.sentinel.qos_specs)
fake_volume_driver = mock_get_volume_driver.return_value
expected_try_count = 1
if attach_failed:
expected_try_count += CONF.hyperv.volume_attach_retry_count
fake_volume_driver.set_disk_qos_specs.side_effect = (
test.TestingException)
self.assertRaises(exception.VolumeAttachFailed,
self._volumeops.attach_volume,
fake_conn_info,
mock.sentinel.inst_name,
mock.sentinel.disk_bus)
else:
self._volumeops.attach_volume(
fake_conn_info,
mock.sentinel.inst_name,
mock.sentinel.disk_bus)
mock_get_volume_driver.assert_any_call(
fake_conn_info)
fake_volume_driver.attach_volume.assert_has_calls(
[mock.call(fake_conn_info,
mock.sentinel.inst_name,
mock.sentinel.disk_bus)] * expected_try_count)
fake_volume_driver.set_disk_qos_specs.assert_has_calls(
[mock.call(fake_conn_info,
mock.sentinel.qos_specs)] * expected_try_count)
if attach_failed:
fake_volume_driver.disconnect_volume.assert_called_once_with(
fake_conn_info)
mock_sleep.assert_has_calls(
[mock.call(CONF.hyperv.volume_attach_retry_interval)] *
CONF.hyperv.volume_attach_retry_count)
else:
mock_sleep.assert_not_called()
def test_attach_volume(self):
self._test_attach_volume(attach_failed=False)
def test_attach_volume_exc(self):
self._test_attach_volume(attach_failed=True)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_disconnect_volume(self, mock_get_volume_driver):
fake_volume_driver = mock_get_volume_driver.return_value
self._volumeops.disconnect_volume(mock.sentinel.conn_info)
mock_get_volume_driver.assert_called_once_with(
mock.sentinel.conn_info)
fake_volume_driver.disconnect_volume.assert_called_once_with(
mock.sentinel.conn_info)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_detach_volume(self, mock_get_volume_driver):
fake_volume_driver = mock_get_volume_driver.return_value
fake_conn_info = {'data': 'fake_conn_info_data'}
self._volumeops.detach_volume(fake_conn_info,
mock.sentinel.inst_name)
mock_get_volume_driver.assert_called_once_with(
fake_conn_info)
fake_volume_driver.detach_volume.assert_called_once_with(
fake_conn_info, mock.sentinel.inst_name)
fake_volume_driver.disconnect_volume.assert_called_once_with(
fake_conn_info)
@mock.patch.object(connector, 'get_connector_properties')
def test_get_volume_connector(self, mock_get_connector):
conn = self._volumeops.get_volume_connector()
mock_get_connector.assert_called_once_with(
root_helper=None,
my_ip=CONF.my_block_storage_ip,
multipath=CONF.hyperv.use_multipath_io,
enforce_multipath=True,
host=CONF.host)
self.assertEqual(mock_get_connector.return_value, conn)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_connect_volumes(self, mock_get_volume_driver):
block_device_info = get_fake_block_dev_info()
self._volumeops.connect_volumes(block_device_info)
init_vol_conn = (
mock_get_volume_driver.return_value.connect_volume)
init_vol_conn.assert_called_once_with(
block_device_info['block_device_mapping'][0]['connection_info'])
@mock.patch.object(volumeops.VolumeOps,
'get_disk_resource_path')
def test_get_disk_path_mapping(self, mock_get_disk_path):
block_device_info = get_fake_block_dev_info()
block_device_mapping = block_device_info['block_device_mapping']
fake_conn_info = get_fake_connection_info()
block_device_mapping[0]['connection_info'] = fake_conn_info
mock_get_disk_path.return_value = mock.sentinel.disk_path
resulted_disk_path_mapping = self._volumeops.get_disk_path_mapping(
block_device_info)
mock_get_disk_path.assert_called_once_with(fake_conn_info)
expected_disk_path_mapping = {
mock.sentinel.serial: mock.sentinel.disk_path
}
self.assertEqual(expected_disk_path_mapping,
resulted_disk_path_mapping)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_get_disk_resource_path(self, mock_get_volume_driver):
fake_conn_info = get_fake_connection_info()
fake_volume_driver = mock_get_volume_driver.return_value
resulted_disk_path = self._volumeops.get_disk_resource_path(
fake_conn_info)
mock_get_volume_driver.assert_called_once_with(fake_conn_info)
get_mounted_disk = fake_volume_driver.get_disk_resource_path
get_mounted_disk.assert_called_once_with(fake_conn_info)
self.assertEqual(get_mounted_disk.return_value,
resulted_disk_path)
def test_bytes_per_sec_to_iops(self):
no_bytes = 15 * units.Ki
expected_iops = 2
resulted_iops = self._volumeops.bytes_per_sec_to_iops(no_bytes)
self.assertEqual(expected_iops, resulted_iops)
@mock.patch.object(volumeops.LOG, 'warning')
def test_validate_qos_specs(self, mock_warning):
supported_qos_specs = [mock.sentinel.spec1, mock.sentinel.spec2]
requested_qos_specs = {mock.sentinel.spec1: mock.sentinel.val,
mock.sentinel.spec3: mock.sentinel.val2}
self._volumeops.validate_qos_specs(requested_qos_specs,
supported_qos_specs)
self.assertTrue(mock_warning.called)
class BaseVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for Hyper-V BaseVolumeDriver class."""
def setUp(self):
super(BaseVolumeDriverTestCase, self).setUp()
self._base_vol_driver = volumeops.BaseVolumeDriver()
self._base_vol_driver._diskutils = mock.Mock()
self._base_vol_driver._vmutils = mock.Mock()
self._base_vol_driver._migrutils = mock.Mock()
self._base_vol_driver._conn = mock.Mock()
self._vmutils = self._base_vol_driver._vmutils
self._migrutils = self._base_vol_driver._migrutils
self._diskutils = self._base_vol_driver._diskutils
self._conn = self._base_vol_driver._conn
@mock.patch.object(connector.InitiatorConnector, 'factory')
def test_connector(self, mock_conn_factory):
self._base_vol_driver._conn = None
self._base_vol_driver._protocol = mock.sentinel.protocol
self._base_vol_driver._extra_connector_args = dict(
fake_conn_arg=mock.sentinel.conn_val)
conn = self._base_vol_driver._connector
self.assertEqual(mock_conn_factory.return_value, conn)
mock_conn_factory.assert_called_once_with(
protocol=mock.sentinel.protocol,
root_helper=None,
use_multipath=CONF.hyperv.use_multipath_io,
device_scan_attempts=CONF.hyperv.mounted_disk_query_retry_count,
device_scan_interval=(
CONF.hyperv.mounted_disk_query_retry_interval),
**self._base_vol_driver._extra_connector_args)
def test_connect_volume(self):
conn_info = get_fake_connection_info()
dev_info = self._base_vol_driver.connect_volume(conn_info)
expected_dev_info = self._conn.connect_volume.return_value
self.assertEqual(expected_dev_info, dev_info)
self._conn.connect_volume.assert_called_once_with(
conn_info['data'])
def test_disconnect_volume(self):
conn_info = get_fake_connection_info()
self._base_vol_driver.disconnect_volume(conn_info)
self._conn.disconnect_volume.assert_called_once_with(
conn_info['data'])
@mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_res_path')
def _test_get_disk_resource_path_by_conn_info(self,
mock_get_disk_res_path,
disk_found=True):
conn_info = get_fake_connection_info()
mock_vol_paths = [mock.sentinel.disk_path] if disk_found else []
self._conn.get_volume_paths.return_value = mock_vol_paths
if disk_found:
disk_res_path = self._base_vol_driver.get_disk_resource_path(
conn_info)
self._conn.get_volume_paths.assert_called_once_with(
conn_info['data'])
self.assertEqual(mock_get_disk_res_path.return_value,
disk_res_path)
mock_get_disk_res_path.assert_called_once_with(
mock.sentinel.disk_path)
else:
self.assertRaises(exception.DiskNotFound,
self._base_vol_driver.get_disk_resource_path,
conn_info)
def test_get_existing_disk_res_path(self):
self._test_get_disk_resource_path_by_conn_info()
def test_get_unfound_disk_res_path(self):
self._test_get_disk_resource_path_by_conn_info(disk_found=False)
def test_get_block_dev_res_path(self):
self._base_vol_driver._is_block_dev = True
mock_get_dev_number = (
self._diskutils.get_device_number_from_device_name)
mock_get_dev_number.return_value = mock.sentinel.dev_number
self._vmutils.get_mounted_disk_by_drive_number.return_value = (
mock.sentinel.disk_path)
disk_path = self._base_vol_driver._get_disk_res_path(
mock.sentinel.dev_name)
mock_get_dev_number.assert_called_once_with(mock.sentinel.dev_name)
self._vmutils.get_mounted_disk_by_drive_number.assert_called_once_with(
mock.sentinel.dev_number)
self.assertEqual(mock.sentinel.disk_path, disk_path)
def test_get_virt_disk_res_path(self):
# For virtual disk images, we expect the resource path to be the
# actual image path, as opposed to passthrough disks, in which case we
# need the Msvm_DiskDrive resource path when attaching it to a VM.
self._base_vol_driver._is_block_dev = False
path = self._base_vol_driver._get_disk_res_path(
mock.sentinel.disk_path)
self.assertEqual(mock.sentinel.disk_path, path)
@mock.patch.object(volumeops.BaseVolumeDriver,
'_get_disk_res_path')
@mock.patch.object(volumeops.BaseVolumeDriver, '_get_disk_ctrl_and_slot')
@mock.patch.object(volumeops.BaseVolumeDriver,
'connect_volume')
def _test_attach_volume(self, mock_connect_volume,
mock_get_disk_ctrl_and_slot,
mock_get_disk_res_path,
is_block_dev=True):
connection_info = get_fake_connection_info()
self._base_vol_driver._is_block_dev = is_block_dev
mock_connect_volume.return_value = dict(path=mock.sentinel.raw_path)
mock_get_disk_res_path.return_value = (
mock.sentinel.disk_path)
mock_get_disk_ctrl_and_slot.return_value = (
mock.sentinel.ctrller_path,
mock.sentinel.slot)
self._base_vol_driver.attach_volume(
connection_info=connection_info,
instance_name=mock.sentinel.instance_name,
disk_bus=mock.sentinel.disk_bus)
if is_block_dev:
self._vmutils.attach_volume_to_controller.assert_called_once_with(
mock.sentinel.instance_name,
mock.sentinel.ctrller_path,
mock.sentinel.slot,
mock.sentinel.disk_path,
serial=connection_info['serial'])
else:
self._vmutils.attach_drive.assert_called_once_with(
mock.sentinel.instance_name,
mock.sentinel.disk_path,
mock.sentinel.ctrller_path,
mock.sentinel.slot)
mock_get_disk_res_path.assert_called_once_with(
mock.sentinel.raw_path)
mock_get_disk_ctrl_and_slot.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.disk_bus)
def test_attach_volume_image_file(self):
self._test_attach_volume(is_block_dev=False)
def test_attach_volume_block_dev(self):
self._test_attach_volume(is_block_dev=True)
def test_detach_volume_planned_vm(self):
self._base_vol_driver.detach_volume(mock.sentinel.connection_info,
mock.sentinel.inst_name)
self._vmutils.detach_vm_disk.assert_not_called()
@mock.patch.object(volumeops.BaseVolumeDriver,
'get_disk_resource_path')
def test_detach_volume(self, mock_get_disk_resource_path):
self._migrutils.planned_vm_exists.return_value = False
connection_info = get_fake_connection_info()
self._base_vol_driver.detach_volume(connection_info,
mock.sentinel.instance_name)
mock_get_disk_resource_path.assert_called_once_with(
connection_info)
self._vmutils.detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name,
mock_get_disk_resource_path.return_value,
is_physical=self._base_vol_driver._is_block_dev)
def test_get_disk_ctrl_and_slot_ide(self):
ctrl, slot = self._base_vol_driver._get_disk_ctrl_and_slot(
mock.sentinel.instance_name,
disk_bus=constants.CTRL_TYPE_IDE)
expected_ctrl = self._vmutils.get_vm_ide_controller.return_value
expected_slot = 0
self._vmutils.get_vm_ide_controller.assert_called_once_with(
mock.sentinel.instance_name, 0)
self.assertEqual(expected_ctrl, ctrl)
self.assertEqual(expected_slot, slot)
def test_get_disk_ctrl_and_slot_scsi(self):
ctrl, slot = self._base_vol_driver._get_disk_ctrl_and_slot(
mock.sentinel.instance_name,
disk_bus=constants.CTRL_TYPE_SCSI)
expected_ctrl = self._vmutils.get_vm_scsi_controller.return_value
expected_slot = (
self._vmutils.get_free_controller_slot.return_value)
self._vmutils.get_vm_scsi_controller.assert_called_once_with(
mock.sentinel.instance_name)
self._vmutils.get_free_controller_slot(
self._vmutils.get_vm_scsi_controller.return_value)
self.assertEqual(expected_ctrl, ctrl)
self.assertEqual(expected_slot, slot)
def test_set_disk_qos_specs(self):
# This base method is a noop, we'll just make sure
# it doesn't error out.
self._base_vol_driver.set_disk_qos_specs(
mock.sentinel.conn_info, mock.sentinel.disk_qos_spes)
class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for Hyper-V BaseVolumeDriver class."""
def test_extra_conn_args(self):
fake_iscsi_initiator = (
'PCI\\VEN_1077&DEV_2031&SUBSYS_17E8103C&REV_02\\'
'4&257301f0&0&0010_0')
self.flags(iscsi_initiator_list=[fake_iscsi_initiator],
group='hyperv')
expected_extra_conn_args = dict(
initiator_list=[fake_iscsi_initiator])
vol_driver = volumeops.ISCSIVolumeDriver()
self.assertEqual(expected_extra_conn_args,
vol_driver._extra_connector_args)
class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SMBFSVolumeDriver class."""
_FAKE_EXPORT_PATH = '//ip/share/'
_FAKE_CONN_INFO = get_fake_connection_info(export=_FAKE_EXPORT_PATH)
def setUp(self):
super(SMBFSVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.SMBFSVolumeDriver()
self._volume_driver._conn = mock.Mock()
self._conn = self._volume_driver._conn
def test_get_export_path(self):
export_path = self._volume_driver._get_export_path(
self._FAKE_CONN_INFO)
expected_path = self._FAKE_EXPORT_PATH.replace('/', '\\')
self.assertEqual(expected_path, export_path)
@mock.patch.object(volumeops.BaseVolumeDriver, 'attach_volume')
def test_attach_volume(self, mock_attach):
# The tested method will just apply a lock before calling
# the superclass method.
self._volume_driver.attach_volume(
self._FAKE_CONN_INFO,
mock.sentinel.instance_name,
disk_bus=mock.sentinel.disk_bus)
mock_attach.assert_called_once_with(
self._FAKE_CONN_INFO,
mock.sentinel.instance_name,
disk_bus=mock.sentinel.disk_bus)
@mock.patch.object(volumeops.BaseVolumeDriver, 'detach_volume')
def test_detach_volume(self, mock_detach):
self._volume_driver.detach_volume(
self._FAKE_CONN_INFO,
instance_name=mock.sentinel.instance_name)
mock_detach.assert_called_once_with(
self._FAKE_CONN_INFO,
instance_name=mock.sentinel.instance_name)
@mock.patch.object(volumeops.VolumeOps, 'bytes_per_sec_to_iops')
@mock.patch.object(volumeops.VolumeOps, 'validate_qos_specs')
@mock.patch.object(volumeops.BaseVolumeDriver, 'get_disk_resource_path')
def test_set_disk_qos_specs(self, mock_get_disk_path,
mock_validate_qos_specs,
mock_bytes_per_sec_to_iops):
fake_total_bytes_sec = 8
fake_total_iops_sec = 1
storage_qos_specs = {'total_bytes_sec': fake_total_bytes_sec}
expected_supported_specs = ['total_iops_sec', 'total_bytes_sec']
mock_set_qos_specs = self._volume_driver._vmutils.set_disk_qos_specs
mock_bytes_per_sec_to_iops.return_value = fake_total_iops_sec
mock_get_disk_path.return_value = mock.sentinel.disk_path
self._volume_driver.set_disk_qos_specs(self._FAKE_CONN_INFO,
storage_qos_specs)
mock_validate_qos_specs.assert_called_once_with(
storage_qos_specs, expected_supported_specs)
mock_bytes_per_sec_to_iops.assert_called_once_with(
fake_total_bytes_sec)
mock_get_disk_path.assert_called_once_with(self._FAKE_CONN_INFO)
mock_set_qos_specs.assert_called_once_with(
mock.sentinel.disk_path,
fake_total_iops_sec)
class RBDVolumeDriver(test_base.HyperVBaseTestCase):
def test_get_vol_driver(self):
self._volumeops = volumeops.VolumeOps()
self._volumeops._volutils = mock.MagicMock()
self._volumeops._vmutils = mock.Mock()
connection_info = get_fake_connection_info()
connection_info['driver_volume_type'] = 'rbd'
drv = self._volumeops._get_volume_driver(connection_info)
# Not much to test here. The Hyper-V driver volume attach code
# is mostly generic and all the RBD related plumbing is handled
# by os-brick.
#
# We'll just ensure that the RBD driver can be retrieved and that it
# has the right fields.
self.assertTrue(drv._is_block_dev)
self.assertEqual('rbd', drv._protocol)
# Hyper-V requires a virtual SCSI disk so we'll ask for a
# local attach.
self.assertEqual(dict(do_local_attach=True),
drv._extra_connector_args)
|
|
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
# ## Explore the Data
# The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following:
# * airplane
# * automobile
# * bird
# * cat
# * deer
# * dog
# * frog
# * horse
# * ship
# * truck
#
# Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). The `sample_id` is the id for a image and label pair in the batch.
#
# Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
# In[2]:
#get_ipython().magic('matplotlib inline')
#get_ipython().magic("config InlineBackend.figure_format = 'retina'")
import helper
import numpy as np
# Explore the dataset
batch_id = 4
sample_id = 222
#helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
# In[3]:
import pickle
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
# In[4]:
# check value range of data
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
print("min value: {:d} max value: {:d}".format(features.min(), features.max()))
# ## Implement Preprocess Functions
# ### Normalize
# In the cell below, implement the `normalize` function to take in image data, `x`, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as `x`.
# In[5]:
#def normalize(x):
# """
# Normalize a list of sample image data in the range of 0 to 1
# : x: List of image data. The image shape is (32, 32, 3)
# : return: Numpy array of normalize data
# """
# # TODO: Implement Function
# normalizer = lambda n: n/255
# return np.vectorize normalizer
# do this in one line instead with a lambda and numpy's great vectorize function :)
normalize = np.vectorize(lambda i: i/255)
# ### One-hot encode
# Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the `one_hot_encode` function. The input, `x`, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to `one_hot_encode`. Make sure to save the map of encodings outside the function.
#
# Hint: Don't reinvent the wheel.
# In[6]:
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
"""
# the return matrix (shape: [num samples x 10 categories])
ret = np.zeros([len(x), 10], dtype=np.float32)
for i, label in enumerate(x):
ret[i][label] = 1 # only set the label/category-slot to 1, leave all others at 0
return ret
import pickle
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import importlib
import aiopening as ai
import tensorflow as tf
n_classes = 10
class MyNet(ai.Model):
def __init__(self, name, num_colors=3, **kwargs):
super().__init__(name, **kwargs)
# input depth (the 4th slot of the shape of the input volume (#samples x w x h x in-depth))
self.num_colors = num_colors
# have to implement the build method
# build the entire graph from scratch into the default graph (the call to reset will handle this for us)
def construct(self):
# define our three inputs to this graph
x, y, keep_prob = self.add_feeds((tf.float32, [None, 32, 32, 3], "x"), (tf.float32, [None, n_classes], "y"),
(tf.float32, None, "keep_prob"))
# create the convolutional part
conv = ai.modules.Convolutional2DNN("convolutional_module",
output_channels=16,
kernel_shapes=(8, 8),
strides=(1, 1),
max_pooling=True,
pool_k_sizes=(2, 2),
pool_strides=(2, 2)
)
# now conv is an snt.AbstractModule
conv_out = conv(x)
# add dropout to conv layer
conv_out = tf.nn.dropout(conv_out, keep_prob)
# conv_out is the output of the convolutional part AND input the the next module (flatten)
flatten = ai.modules.FlattenLayer("flatten_module")
flatten_out = flatten(conv_out)
# flatten_out is the output of the flattening operation AND the input to the next module (fully connected)
fc = ai.modules.FullyConnectedNN("fc_module", [160], activations=[tf.nn.relu])
fc_out = fc(flatten_out)
fc_end = ai.modules.FullyConnectedNN("logits", [10], activations=None)
logits = fc_end(fc_out)
# out are now the logits coming from the last layer
self._outputs["logits"] = logits
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y), name="cost")
train_op = tf.train.AdamOptimizer().minimize(cost, name="train_op") # set this model's train operation
# predictions
predictions = tf.argmax(logits, 1, name="predictions") # 1=axis 1 (0 is the batch)
# Accuracy
correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) # this will be true or false values
# casting true/false will result in 0.0/1.0, respectively
# the mean of these 0.0 or 1.0 over all samples will give the accuracy over all samples
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")
self.add_outputs(logits, cost, train_op, predictions, accuracy)
# question: where is the print-out for the validation accuracy. This value is missing in the entire project.
# how do we know when to stop training?
# Also, this function is only called on the last piece of the batch that's being worked on (1 to 5)
# so since there are 9000 training samples in a batch and my batch size if 512, this function is only run on the
# last 296 samples, which doesn't really make sense. It should be run on the entire training set PLUS on
# the validation set separately
def print_stats(self, session, feature_batch, label_batch):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
cost_out, acc_out = session.run([self.get_output("cost"), self.get_output("accuracy")],
feed_dict={self.get_feed("x"): feature_batch, self.get_feed("y"): label_batch, self.get_feed("keep_prob"): 1.0})
###DEBUG: cost_out, acc_out, logits_out, labels_out = session.run([cost, accuracy, logits, y], feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
print("Loss: {:.2f} Accuracy: {:.2f}%".format(cost_out, acc_out*100))
#### DEBUG: (had to find out why cost would go down to ~2.3 and accuracy would stay at ~10%, it's because all 10 categories would get 0.1 in the softmax, which is useless)
## list of 10-logits (predicted) for each input sample
#logits_list = tf.unpack(logits_out)
## list of 10-logits (actual label) for each input sample
#labels_list = tf.unpack(labels_out)
## loop over all input samples
#for i, l in enumerate(logits_list):
# print("Sample: "+str(i))
# predicted_cats = tf.unpack(l)
# actual_cats = tf.unpack(labels_list[i])
# print("output-predictions (softmaxed): "+str(session.run(tf.nn.softmax(predicted_cats))))
# print("output-labels: "+str(session.run(actual_cats)))
epochs = 1
batch_size = 2048
keep_probability = 1.0 # making this smaller 1.0 usually gives me worse results (I'm guessing my model is too simple to be needing dropout at all)
myModel = MyNet("test_conv_nn", 3)
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
## Initializing the variables
#sess.run(tf.global_variables_initializer())
# Training cycle -> this will be the algorithm
for epoch in range(epochs):
batch_i = 1
i = 0
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
#print("training mini-batch {:d} Len-features={:d} Len-labels={:d}".format(i, len(batch_features), len(batch_labels)))
myModel.train(sess, {"keep_prob": keep_probability, "x": batch_features, "y": batch_labels}, init=True if epoch == 0 and i == 0 else False)
i += 1
print('Epoch {:>2}/{:d}, CIFAR-10 Batch {:d}:'.format(epoch + 1, epochs, batch_i))
print('Training Set: ', end='')
myModel.print_stats(sess, batch_features, batch_labels)
"""
# ### Fully Train the Model
# Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
# In[ ]:
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all CIFAR-10 batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
sess.run(myModel.get_output("train_op"), feed_dict={
myModel.get_feed("keep_prob"): keep_probability,
myModel.get_feed("x"): batch_features,
myModel.get_feed("y"): batch_labels})
myModel.print_stats(sess, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}:'.format(epoch + 1, batch_i),)
print('Training Set: ', end='')
# CORRECTION: print out validation loss and accuracy
# also, using the print_stats function now instead of 'custom' code
#print('Validation Set: ', end='')
#print_stats(sess, valid_feature_batch, valid_label_batch, cost, accuracy)
#print("")
# Save Model's variables
myModel.save_state(sess)
"""
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
#Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
"""
# ## Why 50-70% Accuracy?
# You might be wondering why you can't get an accuracy any higher. First things first, 50% isn't bad for a simple CNN. Pure guessing would get you 10% accuracy. However, you might notice people are getting scores [well above 70%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130). That's because we haven't taught you all there is to know about neural networks. We still need to cover a few more techniques.
# ## Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_image_classification.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
# ## Trying my daughter's (7) frog painting :)
#
# In[ ]:
"""import matplotlib.pyplot as plt
from matplotlib.image import imread
import helper
image1 = imread("/home/ubuntu/deep-learning/image-classification/katis_frog.png")
image2 = imread("/home/ubuntu/deep-learning/image-classification/henrik_auto.png")
image3 = imread("/home/ubuntu/deep-learning/image-classification/katis_dog.png")
f, ax = plt.subplots(3, sharey=True)
ax[0].set_title('picture 0')
ax[0].imshow(image1)
ax[1].set_title('picture 1')
ax[1].imshow(image2)
ax[2].set_title('picture 2')
ax[2].imshow(image3)
#plt.xlim(0, 32)
#plt.ylim(32, 0)
# slice away alpha channel
def slice_alpha(image):
return image[:,:,:-1]
image1 = slice_alpha(image1)
image2 = slice_alpha(image2)
image3 = slice_alpha(image3)
# fill up image array (1st dim of input tensor)
images = [image1, image2, image3]
loaded_graph = tf.Graph()
label_names = helper._load_label_names()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
predictions = sess.run(tf.argmax(loaded_logits, 1), feed_dict={loaded_x: images, loaded_keep_prob: 1.0})
#print(predictions)
for i,pred in enumerate(predictions):
print("Picture {:d} is showing a {:s}".format(i,label_names[pred]))
"""
def predict(self, session, inputs):
predictions = session.run(self._predictions, feed_dict=self.get_feed_dict())
|
|
# -*- coding: utf-8 -*-
import uuid
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth import authenticate, login
from django.core.mail import EmailMessage
from django.http import Http404
from django.shortcuts import render, redirect
from django.template import loader
from django.utils.translation import ugettext as _
from organizations.backends.tokens import RegistrationTokenGenerator
from organizations.backends.forms import UserRegistrationForm, OrganizationRegistrationForm
from organizations.models import get_user_model
from organizations.utils import create_organization
from organizations.utils import model_field_attr
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
# Backend classes should provide common interface
class BaseBackend(object):
"""
Base backend class for registering and inviting users to an organization
"""
def __init__(self, *args, **kwargs):
self.user_model = get_user_model()
def get_urls(self):
raise NotImplementedError
def get_success_url(self):
"""Will return the class's `success_url` attribute unless overridden"""
raise NotImplementedError
def get_form(self, **kwargs):
"""Returns the form for registering or inviting a user"""
if not hasattr(self, 'form_class'):
raise AttributeError(_("You must define a form_class"))
return self.form_class(**kwargs)
def get_token(self, user, **kwargs):
"""Returns a unique token for the given user"""
return RegistrationTokenGenerator().make_token(user)
def get_username(self):
"""Returns a UUID based 'random' and unique username"""
return str(uuid.uuid4())[:model_field_attr(self.user_model, 'username', 'max_length')]
def activate_view(self, request, user_id, token):
"""
Activates the given User by setting `is_active` to true if the provided
information is verified.
"""
try:
user = self.user_model.objects.get(id=user_id, is_active=False)
except self.user_model.DoesNotExist:
raise Http404(_("Your URL may have expired."))
if not RegistrationTokenGenerator().check_token(user, token):
raise Http404(_("Your URL may have expired."))
form = self.get_form(data=request.POST or None, instance=user)
if form.is_valid():
form.instance.is_active = True
user = form.save()
user.set_password(form.cleaned_data['password'])
user.save()
for org in user.organization_set.filter(is_active=False):
org.is_active = True
org.save()
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password'])
login(request, user)
return redirect(self.get_success_url())
return render(request, 'organizations/register_form.html',
{'form': form})
def send_reminder(self, user, sender=None, **kwargs):
"""Sends a reminder email to the specified user"""
if user.is_active:
return False
token = RegistrationTokenGenerator().make_token(user)
kwargs.update({'token': token})
self._send_email(user, self.reminder_subject, self.reminder_body,
sender, **kwargs)
def _send_email(self, user, subject_template, body_template,
sender=None, **kwargs):
"""Utility method for sending emails to new users"""
if sender:
from_email = "%s %s <%s>" % (sender.first_name, sender.last_name,
settings.DEFAULT_FROM_EMAIL)
reply_to = "%s %s <%s>" % (sender.first_name, sender.last_name,
sender.email)
else:
from_email = settings.DEFAULT_FROM_EMAIL
reply_to = {}
headers = {'Reply-To': reply_to}
kwargs.update({'sender': sender, 'user': user})
ctx = kwargs
subject_template = loader.get_template(subject_template)
body_template = loader.get_template(body_template)
subject = subject_template.render(ctx).strip() # Remove stray newline characters
body = body_template.render(ctx)
return EmailMessage(subject, body, from_email, [user.email],
headers).send()
class RegistrationBackend(BaseBackend):
"""
A backend for allowing new users to join the site by creating a new user
associated with a new organization.
"""
# NOTE this backend stands to be simplified further, as email verification
# should be beyond the purview of this app
activation_subject = 'organizations/email/activation_subject.txt'
activation_body = 'organizations/email/activation_body.html'
reminder_subject = 'organizations/email/reminder_subject.txt'
reminder_body = 'organizations/email/reminder_body.html'
form_class = UserRegistrationForm
def get_success_url(self):
return reverse('registration_success')
def get_urls(self):
return [
url(r'^complete/$', self.success_view,
name="registration_success"),
url(r'^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
self.activate_view, name="registration_register"),
url(r'^$', self.create_view, name="registration_create"),
]
def register_by_email(self, email, sender=None, request=None, **kwargs):
"""
Returns a User object filled with dummy data and not active, and sends
an invitation email.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(username=self.get_username(),
email=email, password=self.user_model.objects.make_random_password())
user.is_active = False
user.save()
self.send_activation(user, sender, **kwargs)
return user
def send_activation(self, user, sender=None, **kwargs):
"""
Invites a user to join the site
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({'token': token})
self._send_email(user, self.activation_subject, self.activation_body,
sender, **kwargs)
def create_view(self, request):
"""
Initiates the organization and user account creation process
"""
if callable(request.user.is_authenticated):
is_authenticated = request.user.is_authenticated()
else:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return redirect("organization_add")
form = OrganizationRegistrationForm(request.POST or None)
if form.is_valid():
try:
user = self.user_model.objects.get(email=form.cleaned_data['email'])
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(username=self.get_username(),
email=form.cleaned_data['email'],
password=self.user_model.objects.make_random_password())
user.is_active = False
user.save()
else:
return redirect("organization_add")
organization = create_organization(user, form.cleaned_data['name'],
form.cleaned_data['slug'], is_active=False)
return render(request, 'organizations/register_success.html',
{'user': user, 'organization': organization})
return render(request, 'organizations/register_form.html',
{'form': form})
def success_view(self, request):
return render(request, 'organizations/register_success.html', {})
class InvitationBackend(BaseBackend):
"""
A backend for inviting new users to join the site as members of an
organization.
"""
invitation_subject = 'organizations/email/invitation_subject.txt'
invitation_body = 'organizations/email/invitation_body.html'
reminder_subject = 'organizations/email/reminder_subject.txt'
reminder_body = 'organizations/email/reminder_body.html'
form_class = UserRegistrationForm
def get_success_url(self):
return reverse('organization_list')
def get_urls(self):
return [
url(r'^(?P<user_id>[\d]+)-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
self.activate_view, name="invitations_register"),
]
def invite_by_email(self, email, sender=None, request=None, **kwargs):
"""Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users.
"""
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
user = self.user_model.objects.create(username=self.get_username(),
email=email, password=self.user_model.objects.make_random_password())
user.is_active = False
user.save()
self.send_invitation(user, sender, **kwargs)
return user
def send_invitation(self, user, sender=None, **kwargs):
"""An intermediary function for sending an invitation email that
selects the templates, generating the token, and ensuring that the user
has not already joined the site.
"""
if user.is_active:
return False
token = self.get_token(user)
kwargs.update({'token': token})
self._send_email(user, self.invitation_subject, self.invitation_body,
sender, **kwargs)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# Selenium Framework
A framework built around the Selenium WebDriver to simplify the basic capabilities that are
most frequently used (in my experience anyway).
'''
__title__ = 'Selenium Framework'
__version__ = '0.5.0'
__copyright__ = '2011 - 2020'
__author__ = 'John Dahl'
__date__ = '2020-02-08'
__license__ = '''MIT License
Copyright (c) 2011-2020 John Dahl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
################################################################################
import re
import time
from datetime import datetime
from selenium import webdriver as WD
from selenium.webdriver.common.action_chains import ActionChains as AC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
# from selenium.webdriver.remote.webdriver import WebDriver
# from selenium.common import exceptions as EX
# from selenium.common.exceptions import NoSuchElementException
class FrameworkException(WebDriverException):
'''Framework exception for deliberately thrown exceptions.'''
# pass
class Driver():
'''
Main framework object. Everything runs from an instance of this object.
'''
def __init__(self):
self.browser = None
self.close = None
self.web_element = WebElement
self.keys = Keys
############################################################################
# Browser-dependent Methods
############################################################################
def check_alert(self, accept_alert=True):
'''Given that this method is called when a browser alert is present,
retrieve the message in the alert,
take the action requested (accept by default), and
return the message.'''
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
if accept_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
except NoAlertPresentException:
return False
except WebDriverException:
self.throw(f'Unable to check the alert.')
def control_click(self, webelement, container=None):
'''Given a web element,
perform a control-click on the element and
return a boolean confirmation of the success of the action.'''
if container is None:
container = self.browser
try:
AC(container)\
.context_click(webelement)\
.send_keys(Keys.ARROW_DOWN)\
.send_keys(Keys.ENTER)\
.perform()
self.wait(.5)
return True
except WebDriverException:
return False
def double_click(self, webelement, container=None):
'''Given a web element,
perform a double-click on the element and
return a boolean confirmation of the success of the action.'''
if container is None:
container = self.browser
try:
AC(container).double_click(webelement).perform()
self.wait(.5)
return True
except WebDriverException:
return False
def find(self, locator_string, container=None, wait=3):
'''Given a locator in the form of "type=value",
an optional container within which to start a nested search,
and an optional wait time,
locate the elements and return a list of results.
If a result is not found, a second attempt will be made after
the wait time.
If the search returns a single result, that result is returned
from the method as a WebElement.'''
if not container:
container = self.browser
loc_type, loc_value = self._convert_locator(locator_string)
try:
element_list = container.find_elements(by=loc_type, value=loc_value)
if (not element_list and isinstance(wait, int) and wait > 0):
WebDriverWait(self.browser, wait).until(
EC.presence_of_element_located((loc_type, loc_value))
)
element_list = container.find_elements(by=loc_type, value=loc_value)
if len(element_list) == 1:
element_list = element_list[0]
except TimeoutException:
element_list = []
return element_list
def goto(self, url):
'''Navigate to the given url.'''
self.browser.get(url)
def is_element_clickable(self, webelement):
'''Given a web element, determine if it is eligable to click on it.'''
#if not isinstance(webelement, WebElement):
# return False
if (webelement.is_displayed() and
webelement.is_enabled() and
webelement.size['width'] > 0 and
webelement.size['height'] > 0):
return True
else:
return False
def is_field_set(self, webelement):
'''Given a web element representing a form field,
determine if the field has a value set, and if so,
return the value or None.'''
try:
field_tag = webelement.tag_name
field_type = webelement.get_attribute('type') or None
if field_tag == 'select':
field_value_list = Select(webelement).all_selected_options
field_value = [f.text for f in field_value_list]
elif field_tag == 'input' and field_type in ['checkbox', 'radio']:
field_value = webelement.is_selected()
elif field_tag in ['input', 'textarea']:
field_value = webelement.get_attribute('value')
else:
field_value = None
except WebDriverException:
field_value = None
return field_value
def is_radio_button_group_set(self, button_group):
'''Given a web element representing a radio button group,
determine if the field has a value set, and if so,
return the value or None.'''
field_value = [button.get_attribute('value')
for button in button_group
if button.is_selected()]
return field_value
def open(self, config=None):
'''
Open a new instance of the selected browser and
set the close method.
TODO: Add custom profile support.
TODO: Add remote/grid support.
'''
if not config:
browser = input('Which browser would you like to use: ').capitalize()
headless = True
else:
browser = config['browser']
headless = config['headless']
if browser == 'Chrome':
options = WD.chrome.options.Options()
if headless:
options.add_argument("--headless")
options.add_argument("--window-size=1920x1080")
elif browser == 'Firefox':
options = WD.firefox.options.Options()
options.headless = headless
# Remote
# if config.get('remote_hub', False):
# url, port = config['remote_hub']
# if browser_name == 'Ie':
# browser_name = 'INTERNETEXPLORER'
# else:
# browser_name = browser_name.upper()
# self.browser = WD.Remote(
# command_executor=f'http://{url}:{port}/wd/hub',
# desired_capabilities=getattr(WD.DesiredCapabilities, browser_name),
# browser_profile=browser_options['profile'])
try:
self.browser = getattr(WD, browser)(options=options)
self.close = self.browser.close
except IndexError:
self.throw('Unknown browser selected.')
# def open_bak(self, browser_name='gc', selenium_hub='local', selenium_port='4444'):
# elif selenium_hub != 'local':
# command_executor = 'http://{0}:{1}/wd/hub'.format(selenium_hub, selenium_port)
# desired_capabilities = getattr(WD.DesiredCapabilities, remote_driver)
# self.browser = WD.Remote(
# command_executor=command_executor,
# desired_capabilities=desired_capabilities,
# browser_profile=None)
# else:
# raise 'Invalid browser selection.'
# # Setup the driver close method
def right_click(self, webelement, container=None):
'''Given a web element, right-click on it.'''
if container is None:
container = self.browser
try:
AC(container).context_click(webelement).perform()
self.wait(.5)
return True
except WebDriverException:
return False
def scroll_into_view(self, webelement):
'''Given a web element on the current page,
scroll the page until the element is visible.'''
self.browser.execute_script("arguments[0].scrollIntoView(true);", webelement)
self.wait(.5)
def set_field(self, webelement, field_value, append=False):
'''Given a form field, set the value of the field.'''
field_tag = webelement.tag_name
if field_tag == 'textarea' or \
field_tag == 'input' and \
webelement.get_attribute('type') not in ['radio', 'checkbox']:
webelement.click()
if not append:
webelement.clear()
webelement.send_keys(field_value)
#self.find('tag=body').click()
elif field_tag == 'input':
webelement.click()
elif field_tag == 'select':
select_element = Select(webelement)
if isinstance(field_value, str):
select_element.select_by_visible_text(field_value)
elif isinstance(field_value, list):
if (len(field_value) > 1 and not append) or not field_value:
select_element.deselect_all()
for item in field_value:
select_element.select_by_visible_text(str(item))
def set_window(self, window_size, window_position):
'''Set the browser window size and position.'''
self.browser.set_window_size(window_size[0], window_size[1])
self.browser.set_window_position(window_position[0], window_position[1])
def switch_to(self, locator_string=None, container=None):
'''Change the browser context to a different window or frame.'''
try:
if container is None:
container = self.browser
if locator_string is not None:
locator_type, locator_value = self._convert_locator(locator_string)
else:
locator_type = None
if locator_type in [None, '', 'top', 'default']:
self.browser.switch_to.default_content()
elif locator_type == 'window':
container.switch_to.window(locator_value)
elif locator_type == 'frame':
container.switch_to.frame(locator_value)
else:
self.throw(f'Invalid switch-to target: {locator_string}')
except WebDriverException:
self.throw(f'Unable to swtich to the target: {locator_string}')
def wait_until_element_clickable(self, locator_string=None, timeout=30):
'''Wait until the condition exists when the element is clickable or timeout.'''
locator_object = self._convert_locator(locator_string)
webelement = WebDriverWait(self.browser, timeout).until(
EC.element_to_be_clickable(locator_object))
return webelement
############################################################################
# Browser-independent Utilities
############################################################################
def _convert_locator(self, locator_string):
'''Given a locator string in the format type=value
return a tuple in the format (valid_type, value)'''
locator_map = {
'id' : By.ID,
'name' : By.NAME,
'css' : By.CSS_SELECTOR,
'class' : By.CLASS_NAME,
'link' : By.LINK_TEXT,
'plink' : By.PARTIAL_LINK_TEXT,
'tag' : By.TAG_NAME,
'xpath' : By.XPATH,
'window': 'window',
'frame' : 'frame',
}
locator_string_separator = locator_string.find('=')
locator_type = locator_string[:locator_string_separator].strip()
locator_value = locator_string[locator_string_separator + 1:].strip()
return (locator_map[locator_type], locator_value)
def get_date(self):
'''Return the current date.'''
#ut_test_001
return self.get_timestamp()[:10]
def get_time(self):
'''Return the current time.'''
# ut_test_002
return self.get_timestamp()[-8:]
def get_timestamp(self):
'''Return the current datetime.'''
return str(datetime.now())[:-7]
def make_valid_name(self, invalid_name):
'''Remove bad characters and return a valid name.'''
tmp_name = invalid_name.strip()
tmp_name = re.sub('[ -]', '_', tmp_name)
tmp_name = re.sub('[()]', '', tmp_name)
return tmp_name
def pause(self, message_text=None):
'''Pause the current execution until the Enter key is pressed.'''
if message_text:
print(message_text)
input('Press the ENTER key to continue.')
def wait(self, seconds=0):
'''Pause the current execution for the given number of seconds.'''
time.sleep(seconds)
def throw(self, message):
'''Raise a FrameworkException error.'''
raise FrameworkException(message)
if __name__ == '__main__':
print('The Selenium Framework module is not intended to run \
as a script.')
print('Add the following line to your scripts or type this in your \
Python session:')
print('\timport selenium_framework as sf')
|
|
from collections import defaultdict
from itertools import tee, chain, combinations
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors import factor_product
from pgmpy.inference import Inference, BeliefPropagation
class DBNInference(Inference):
def __init__(self, model):
"""
Class for performing inference using Belief Propagation method
for the input Dynamic Bayesian Network.
For the exact inference implementation, the interface algorithm
is used which is adapted from [1].
Parameters
----------
model: Dynamic Bayesian Network
Model for which inference is to performed
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5], [0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.start_junction_tree.nodes()
NodeView(((('X', 0), ('Y', 0)), (('X', 0), ('Z', 0))))
>>> dbn_inf.one_and_half_junction_tree.nodes()
NodeView(((('Z', 1), ('Z', 0)), (('Y', 1), ('X', 1)), (('Z', 1), ('X', 1))))
References
----------
[1] Dynamic Bayesian Networks: Representation, Inference and Learning
by Kevin Patrick Murphy
http://www.cs.ubc.ca/~murphyk/Thesis/thesis.pdf
"""
super(DBNInference, self).__init__(model)
self._initialize_structures()
self.interface_nodes_0 = model.get_interface_nodes(time_slice=0)
self.interface_nodes_1 = model.get_interface_nodes(time_slice=1)
start_markov_model = self.start_bayesian_model.to_markov_model()
one_and_half_markov_model = self.one_and_half_model.to_markov_model()
combinations_slice_0 = tee(combinations(set(self.interface_nodes_0), 2), 2)
combinations_slice_1 = combinations(set(self.interface_nodes_1), 2)
start_markov_model.add_edges_from(combinations_slice_0[0])
one_and_half_markov_model.add_edges_from(
chain(combinations_slice_0[1], combinations_slice_1)
)
self.one_and_half_junction_tree = one_and_half_markov_model.to_junction_tree()
self.start_junction_tree = start_markov_model.to_junction_tree()
self.start_interface_clique = self._get_clique(
self.start_junction_tree, self.interface_nodes_0
)
self.in_clique = self._get_clique(
self.one_and_half_junction_tree, self.interface_nodes_0
)
self.out_clique = self._get_clique(
self.one_and_half_junction_tree, self.interface_nodes_1
)
def _shift_nodes(self, nodes, time_slice):
"""
Shifting the nodes to a certain required timeslice.
Parameters
----------
nodes: list, array-like
List of node names.
nodes that are to be shifted to some other time slice.
time_slice: int
time slice where to shift the nodes.
"""
return [(node[0], time_slice) for node in nodes]
def _get_clique(self, junction_tree, nodes):
"""
Extracting the cliques from the junction tree which are a subset of
the given nodes.
Parameters
----------
junction_tree: Junction tree
from which the nodes are to be extracted.
nodes: iterable container
A container of nodes (list, dict, set, etc.).
"""
return [
clique for clique in junction_tree.nodes() if set(nodes).issubset(clique)
][0]
def _get_evidence(self, evidence_dict, time_slice, shift):
"""
Getting the evidence belonging to a particular timeslice.
Parameters
----------
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
time: int
the evidence corresponding to the time slice
shift: int
shifting the evidence corresponding to the given time slice.
"""
if evidence_dict:
return {
(node[0], shift): evidence_dict[node]
for node in evidence_dict
if node[1] == time_slice
}
def _marginalize_factor(self, nodes, factor):
"""
Marginalizing the factor selectively for a set of variables.
Parameters
----------
nodes: list, array-like
A container of nodes (list, dict, set, etc.).
factor: factor
factor which is to be marginalized.
"""
marginalizing_nodes = list(set(factor.scope()).difference(nodes))
return factor.marginalize(marginalizing_nodes, inplace=False)
def _update_belief(self, belief_prop, clique, clique_potential, message=None):
"""
Method for updating the belief.
Parameters
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique.
"""
old_factor = belief_prop.junction_tree.get_factors(clique)
belief_prop.junction_tree.remove_factors(old_factor)
if message:
if message.scope() and clique_potential.scope():
new_factor = old_factor * message
new_factor = new_factor / clique_potential
else:
new_factor = old_factor
else:
new_factor = old_factor * clique_potential
belief_prop.junction_tree.add_factors(new_factor)
belief_prop.calibrate()
def _get_factor(self, belief_prop, evidence):
"""
Extracts the required factor from the junction tree.
Parameters
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
"""
final_factor = factor_product(*belief_prop.junction_tree.get_factors())
if evidence:
for var in evidence:
if var in final_factor.scope():
final_factor.reduce([(var, evidence[var])])
return final_factor
def _shift_factor(self, factor, shift):
"""
Shifting the factor to a certain required time slice.
Parameters
----------
factor: DiscreteFactor
The factor which needs to be shifted.
shift: int
The new timeslice to which the factor should belong to.
"""
new_scope = self._shift_nodes(factor.scope(), shift)
return DiscreteFactor(new_scope, factor.cardinality, factor.values)
def forward_inference(self, variables, evidence=None, args=None):
"""
Forward inference method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5], [0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.forward_inference([('X', 2)], {('Y', 0):1, ('Y', 1):0, ('Y', 2):1})[('X', 2)].values
array([0.76738736, 0.23261264])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
start_bp = BeliefPropagation(self.start_junction_tree)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
evidence_0 = self._get_evidence(evidence, 0, 0)
interface_nodes_dict = {}
potential_dict = {}
if evidence:
interface_nodes_dict = {
k: v for k, v in evidence_0.items() if k in self.interface_nodes_0
}
initial_factor = self._get_factor(start_bp, evidence_0)
marginalized_factor = self._marginalize_factor(
self.interface_nodes_0, initial_factor
)
potential_dict[0] = marginalized_factor
self._update_belief(mid_bp, self.in_clique, marginalized_factor)
if variable_dict[0]:
factor_values = start_bp.query(
variable_dict[0], evidence=evidence_0, joint=False
)
else:
factor_values = {}
for time_slice in range(1, time_range + 1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
if interface_nodes_dict:
evidence_time.update(interface_nodes_dict)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(
variable_time, evidence=evidence_time, joint=False
)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = DiscreteFactor(
[new_key], new_values[key].cardinality, new_values[key].values
)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
out_clique_phi = self._marginalize_factor(
self.interface_nodes_1, clique_phi
)
new_factor = self._shift_factor(out_clique_phi, 0)
potential_dict[time_slice] = new_factor
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, new_factor)
if evidence_time:
interface_nodes_dict = {
(k[0], 0): v
for k, v in evidence_time.items()
if k in self.interface_nodes_1
}
else:
interface_nodes_dict = {}
if args == "potential":
return potential_dict
return factor_values
def backward_inference(self, variables, evidence=None):
"""
Backward inference method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5], [0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([0.66594382, 0.33405618])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
interface_nodes_dict = {}
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
end_bp = BeliefPropagation(self.start_junction_tree)
potential_dict = self.forward_inference(variables, evidence, "potential")
update_factor = self._shift_factor(potential_dict[time_range], 1)
factor_values = {}
for time_slice in range(time_range, 0, -1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
evidence_prev_time = self._get_evidence(evidence, time_slice - 1, 0)
if evidence_prev_time:
interface_nodes_dict = {
k: v
for k, v in evidence_prev_time.items()
if k in self.interface_nodes_0
}
if evidence_time:
evidence_time.update(interface_nodes_dict)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, potential_dict[time_slice - 1])
forward_factor = self._shift_factor(potential_dict[time_slice], 1)
self._update_belief(mid_bp, self.out_clique, forward_factor, update_factor)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(
variable_time, evidence=evidence_time, joint=False
)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = DiscreteFactor(
[new_key], new_values[key].cardinality, new_values[key].values
)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
in_clique_phi = self._marginalize_factor(self.interface_nodes_0, clique_phi)
update_factor = self._shift_factor(in_clique_phi, 1)
out_clique_phi = self._shift_factor(update_factor, 0)
self._update_belief(
end_bp, self.start_interface_clique, potential_dict[0], out_clique_phi
)
evidence_0 = self._get_evidence(evidence, 0, 0)
if variable_dict[0]:
factor_values.update(
end_bp.query(variable_dict[0], evidence_0, joint=False)
)
return factor_values
def query(self, variables, evidence=None, args="exact"):
"""
Query method for Dynamic Bayesian Network using Interface Algorithm.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5], [0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=[2])
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=[2])
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.query([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([0.66594382, 0.33405618])
"""
if args == "exact":
return self.backward_inference(variables, evidence)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.volume
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
"""
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
nexenta_opts = [
cfg.StrOpt('nexenta_host',
default='',
help='IP address of Nexenta SA'),
cfg.IntOpt('nexenta_rest_port',
default=2000,
help='HTTP port to connect to Nexenta REST API server'),
cfg.StrOpt('nexenta_rest_protocol',
default='auto',
help='Use http or https for REST connection (default auto)'),
cfg.StrOpt('nexenta_user',
default='admin',
help='User name to connect to Nexenta SA'),
cfg.StrOpt('nexenta_password',
default='nexenta',
help='Password to connect to Nexenta SA',
secret=True),
cfg.IntOpt('nexenta_iscsi_target_portal_port',
default=3260,
help='Nexenta target portal port'),
cfg.StrOpt('nexenta_volume',
default='cinder',
help='pool on SA that will hold all volumes'),
cfg.StrOpt('nexenta_target_prefix',
default='iqn.1986-03.com.sun:02:cinder-',
help='IQN prefix for iSCSI targets'),
cfg.StrOpt('nexenta_target_group_prefix',
default='cinder/',
help='prefix for iSCSI target groups on SA'),
cfg.StrOpt('nexenta_blocksize',
default='',
help='block size for volumes (blank=default,8KB)'),
cfg.BoolOpt('nexenta_sparse',
default=False,
help='flag to create sparse volumes'),
]
FLAGS.register_opts(nexenta_opts)
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance."""
def __init__(self):
super(NexentaDriver, self).__init__()
def do_setup(self, context):
protocol = FLAGS.nexenta_rest_protocol
auto = protocol == 'auto'
if auto:
protocol = 'http'
self.nms = jsonrpc.NexentaJSONProxy(
'%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
FLAGS.nexenta_rest_port),
FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
FLAGS.nexenta_volume)
@staticmethod
def _get_zvol_name(volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
@staticmethod
def _get_target_name(volume_name):
"""Return iSCSI target name to access volume."""
return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
@staticmethod
def _get_target_group_name(volume_name):
"""Return Nexenta iSCSI target group name for volume."""
return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
try:
self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '')
except nexenta.NexentaException as exc:
if "zvol has children" in exc.args[1]:
raise exception.VolumeIsBusy(volume_name=volume['name'])
else:
raise
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: shapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: shapshot reference
"""
try:
self.nms.snapshot.destroy(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
'')
except nexenta.NexentaException as exc:
if "snapshot has dependent clones" in exc.args[1]:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
else:
raise
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get zvol exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
:return: iscsiadm-formatted provider location string
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
try:
self.nms.iscsitarget.create_target({'target_name': target_name})
except nexenta.NexentaException as exc:
if not ensure or 'already configured' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group member addition error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LU creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
'lun': '0'})
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
' while ensuring export'), exc)
return '%s:%s,1 %s 0' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
target_name)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
loc = self._do_export(_ctx, volume, ensure=False)
return {'provider_location': loc}
def ensure_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
self._do_export(_ctx, volume, ensure=True)
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
try:
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
' %(target_group)s, assuming it is '
'already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
raise NotImplementedError()
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
raise NotImplementedError()
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
raise NotImplementedError()
|
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django_auth_ldap.backend import _LDAPUser
from django.test.client import RequestFactory
from typing import Any, Callable, Dict, Optional, Text
from builtins import object
from oauth2client.crypt import AppIdentityError
from django.core import signing
from django.core.urlresolvers import reverse
import jwt
import mock
import re
from zerver.forms import HomepageForm
from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, \
do_reactivate_realm, do_reactivate_user
from zerver.lib.initial_password import initial_password
from zerver.lib.session_user import get_session_dict_user
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import \
get_realm, get_user_profile_by_email, email_to_username, UserProfile, \
PreregistrationUser, Realm
from confirmation.models import Confirmation
from zproject.backends import ZulipDummyBackend, EmailAuthBackend, \
GoogleMobileOauth2Backend, ZulipRemoteUserBackend, ZulipLDAPAuthBackend, \
ZulipLDAPUserPopulator, DevAuthBackend, GitHubAuthBackend, ZulipAuthMixin, \
dev_auth_enabled, password_auth_enabled, github_auth_enabled, \
SocialAuthMixin, AUTH_BACKEND_NAME_MAP
from zerver.views.auth import maybe_send_to_registration
from social_core.exceptions import AuthFailed
from social_django.strategy import DjangoStrategy
from social_django.storage import BaseDjangoStorage
from social_core.backends.github import GithubOrganizationOAuth2, GithubTeamOAuth2, \
GithubOAuth2
from six.moves import urllib
from six.moves.http_cookies import SimpleCookie
import ujson
from zerver.lib.test_helpers import MockLDAP
class AuthBackendTest(TestCase):
def verify_backend(self, backend, good_args=None,
good_kwargs=None, bad_kwargs=None,
email_to_username=None):
# type: (Any, List[Any], Dict[str, Any], Dict[str, Any], Callable[[Text], Text]) -> None
if good_args is None:
good_args = []
if good_kwargs is None:
good_kwargs = {}
email = u"hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
username = email
if email_to_username is not None:
username = email_to_username(email)
# If bad_kwargs was specified, verify auth fails in that case
if bad_kwargs is not None:
self.assertIsNone(backend.authenticate(username, **bad_kwargs))
# Verify auth works
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated user
do_deactivate_user(user_profile)
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Reactivate the user and verify auth works again
do_reactivate_user(user_profile)
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated realm
do_deactivate_realm(user_profile.realm)
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Verify auth works again after reactivating the realm
do_reactivate_realm(user_profile.realm)
result = backend.authenticate(username, *good_args, **good_kwargs)
self.assertEqual(user_profile, result)
# ZulipDummyBackend isn't a real backend so the remainder
# doesn't make sense for it
if isinstance(backend, ZulipDummyBackend):
return
# Verify auth fails if the auth backend is disabled on server
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',)):
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
# Verify auth fails if the auth backend is disabled for the realm
for backend_name in AUTH_BACKEND_NAME_MAP.keys():
if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]):
break
index = getattr(user_profile.realm.authentication_methods, backend_name).number
user_profile.realm.authentication_methods.set_bit(index, False)
user_profile.realm.save()
self.assertIsNone(backend.authenticate(username, *good_args, **good_kwargs))
user_profile.realm.authentication_methods.set_bit(index, True)
user_profile.realm.save()
def test_dummy_backend(self):
# type: () -> None
self.verify_backend(ZulipDummyBackend(),
good_kwargs=dict(use_dummy_backend=True),
bad_kwargs=dict(use_dummy_backend=False))
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
def test_email_auth_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
self.setup_subdomain(user_profile)
self.verify_backend(EmailAuthBackend(),
bad_kwargs=dict(password=''),
good_kwargs=dict(password=password))
# Subdomain is ignored when feature is not enabled
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
realm_subdomain='acme',
return_data=dict()))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
realm_subdomain='zulip',
return_data=dict()),
bad_kwargs=dict(password=password,
realm_subdomain='acme',
return_data=dict()))
# Things work normally in the event that we're using a
# non-subdomain login page, even if subdomains are enabled
self.verify_backend(EmailAuthBackend(),
bad_kwargs=dict(password="wrong"),
good_kwargs=dict(password=password))
def test_email_auth_backend_disabled_password_auth(self):
# type: () -> None
email = u"hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# Verify if a realm has password auth disabled, correct password is rejected
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
self.assertIsNone(EmailAuthBackend().authenticate(email, password))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_google_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
backend = GoogleMobileOauth2Backend()
payload = dict(email_verified=True,
email=email)
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend)
# With REALMS_HAVE_SUBDOMAINS off, subdomain is ignored
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain="zulip"),
bad_kwargs=dict(realm_subdomain='acme'))
# Verify valid_attestation parameter is set correctly
unverified_payload = dict(email_verified=False)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=unverified_payload):
ret = dict() # type: Dict[str, str]
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertFalse(ret["valid_attestation"])
nonexistent_user_payload = dict(email_verified=True, email="invalid@zulip.com")
with mock.patch('apiclient.sample_tools.client.verify_id_token',
return_value=nonexistent_user_payload):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertTrue(ret["valid_attestation"])
with mock.patch('apiclient.sample_tools.client.verify_id_token',
side_effect=AppIdentityError):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_backend(self):
# type: () -> None
email = "hamlet@zulip.com"
password = "test_password"
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
backend = ZulipLDAPAuthBackend()
# Test LDAP auth fails when LDAP server rejects password
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn',
side_effect=_LDAPUser.AuthenticationFailed("Failed")), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.assertIsNone(backend.authenticate(email, password))
# For this backend, we mock the internals of django_auth_ldap
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend, good_kwargs=dict(password=password))
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend, good_kwargs=dict(password=password,
realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend,
bad_kwargs=dict(password=password,
realm_subdomain='acme'),
good_kwargs=dict(password=password,
realm_subdomain='zulip'))
def test_devauth_backend(self):
# type: () -> None
self.verify_backend(DevAuthBackend())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend(self):
# type: () -> None
self.setup_subdomain(get_user_profile_by_email(u'hamlet@zulip.com'))
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(realm_subdomain='zulip'),
bad_kwargs=dict(realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend_sso_append_domain(self):
# type: () -> None
self.setup_subdomain(get_user_profile_by_email(u'hamlet@zulip.com'))
with self.settings(SSO_APPEND_DOMAIN='zulip.com'):
self.verify_backend(ZulipRemoteUserBackend(),
email_to_username=email_to_username,
good_kwargs=dict(realm_subdomain='acme'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
with self.settings(SSO_APPEND_DOMAIN='zulip.com'):
self.verify_backend(ZulipRemoteUserBackend(),
email_to_username=email_to_username,
good_kwargs=dict(realm_subdomain='zulip'),
bad_kwargs=dict(realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',))
def test_github_backend(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.setup_subdomain(get_user_profile_by_email(email))
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=dict())
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
# With subdomains, authenticating with the right subdomain
# works; using the wrong subdomain doesn't
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='zulip')
bad_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=bad_kwargs)
class SocialAuthMixinTest(ZulipTestCase):
def test_social_auth_mixing(self):
# type: () -> None
mixin = SocialAuthMixin()
with self.assertRaises(NotImplementedError):
mixin.get_email_address()
with self.assertRaises(NotImplementedError):
mixin.get_full_name()
class GitHubAuthBackendTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = 'hamlet@zulip.com'
self.name = 'Hamlet'
self.backend = GitHubAuthBackend()
self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
self.user_profile = get_user_profile_by_email(self.email)
self.user_profile.backend = self.backend
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.get_host = lambda: 'acme.testserver'
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> UserProfile
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
return self.backend.authenticate(*args, **kwargs)
def test_github_auth_enabled(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
self.assertTrue(github_auth_enabled())
def test_full_name_with_missing_key(self):
# type: () -> None
self.assertEqual(self.backend.get_full_name(), '')
def test_github_backend_do_auth_without_subdomains(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zerver.views.auth.login'):
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertNotIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_non_existing_subdomain(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.backend.strategy.session_set('subdomain', 'test')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_subdomains(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
self.assertEqual('http://zulip.testserver/accounts/login/subdomain/', result.url)
def test_github_backend_do_auth_for_default(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'acme',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_authenticate_nonexisting_user(self):
# type: () -> None
with mock.patch('zproject.backends.get_user_profile_by_email',
side_effect=UserProfile.DoesNotExist("Do not exist")):
response = dict(email=self.email, name=self.name)
return_data = dict() # type: Dict[str, Any]
user = self.backend.authenticate(return_data=return_data, response=response)
self.assertIs(user, None)
self.assertTrue(return_data['valid_attestation'])
def test_github_backend_inactive_user(self):
# type: () -> None
def do_auth_inactive(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['inactive_user'] = True
return self.user_profile
with mock.patch('zerver.views.auth.login_or_register_remote_user') as result, \
mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth_inactive):
response = dict(email=self.email, name=self.name)
user = self.backend.do_auth(response=response)
result.assert_not_called()
self.assertIs(user, None)
def test_github_backend_new_user(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
response = dict(email='nonexisting@phantom.com', name='Ghost')
result = self.backend.do_auth(response=response)
self.assert_in_response('action="/register/"', result)
self.assert_in_response('Your email address does not correspond to any '
'existing organization.', result)
def test_login_url(self):
# type: () -> None
result = self.client_get('/accounts/login/social/github')
self.assertIn(reverse('social:begin', args=['github']), result.url)
class ResponseMock(object):
def __init__(self, status_code, data):
# type: (int, Any) -> None
self.status_code = status_code
self.data = data
def json(self):
# type: () -> str
return self.data
@property
def text(self):
# type: () -> str
return "Response text"
class GoogleOAuthTest(ZulipTestCase):
def google_oauth2_test(self, token_response, account_response, subdomain=None):
# type: (ResponseMock, ResponseMock, Optional[str]) -> HttpResponse
url = "/accounts/login/google/send/"
if subdomain is not None:
url += "?subdomain=" + subdomain
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
if 'google' not in result.url:
return result
self.client.cookies = result.cookies
# Now extract the CSRF token from the redirect URL
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)['state']
with mock.patch("requests.post", return_value=token_response), (
mock.patch("requests.get", return_value=account_response)):
result = self.client_get("/accounts/login/google/done/",
dict(state=csrf_state))
return result
class GoogleSubdomainLoginTest(GoogleOAuthTest):
def get_signed_subdomain_cookie(self, data):
# type: (Dict[str, str]) -> Dict[str, str]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
value = ujson.dumps(data)
return {key: signing.get_cookie_signer(salt=salt).sign(value)}
def unsign_subdomain_cookie(self, result):
# type: (HttpResponse) -> Dict[str, Any]
key = 'subdomain.signature'
salt = key + 'zerver.views.auth'
cookie = result.cookies.get(key)
value = signing.get_cookie_signer(salt=salt).unsign(cookie.value, max_age=15)
return ujson.loads(value)
def test_google_oauth2_start(self):
# type: () -> None
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/google/')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
subdomain = urllib.parse.parse_qs(parsed_url.query)['subdomain']
self.assertEqual(subdomain, ['zulip'])
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
result = self.google_oauth2_test(token_response, account_response, 'zulip')
data = self.unsign_subdomain_cookie(result)
self.assertEqual(data['email'], 'hamlet@zulip.com')
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertEqual(uri, 'http://zulip.testserver/accounts/login/subdomain/')
def test_log_into_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': 'hamlet@zulip.com',
'subdomain': 'zulip'}
self.client.cookies = SimpleCookie(self.get_signed_subdomain_cookie(data))
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_user_cannot_log_into_nonexisting_realm(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, 'acme')
self.assertEqual(result.status_code, 302)
self.assertIn('subdomain=1', result.url)
def test_user_cannot_log_into_wrong_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': 'hamlet@zulip.com',
'subdomain': 'acme'}
self.client.cookies = SimpleCookie(self.get_signed_subdomain_cookie(data))
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_signature_is_bad(self):
# type: () -> None
self.client.cookies = SimpleCookie({'subdomain.signature': 'invlaid'})
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_state_is_not_passed(self):
# type: () -> None
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/login/subdomain/')
self.assertEqual(result.status_code, 400)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
with self.settings(REALMS_HAVE_SUBDOMAINS=True), (
mock.patch('zerver.views.auth.get_subdomain', return_value='zulip')), (
mock.patch('zerver.views.registration.get_subdomain', return_value='zulip')):
email = "newuser@zulip.com"
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, 'zulip')
data = self.unsign_subdomain_cookie(result)
self.assertEqual(data['email'], email)
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertEqual(uri, 'http://zulip.testserver/accounts/login/subdomain/')
result = self.client_get(result.url)
result = self.client_get(result.url) # Call the confirmation url.
key_match = re.search('value="(?P<key>[0-9a-f]+)" name="key"', result.content.decode("utf-8"))
name_match = re.search('value="(?P<name>[^"]+)" name="full_name"', result.content.decode("utf-8"))
# This goes through a brief stop on a page that auto-submits via JS
result = self.client_post('/accounts/register/',
{'full_name': name_match.group("name"),
'key': key_match.group("key"),
'from_confirmation': "1"})
self.assertEqual(result.status_code, 200)
result = self.client_post('/accounts/register/',
{'full_name': "New User",
'password': 'test_password',
'key': key_match.group("key"),
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class GoogleLoginTest(GoogleOAuthTest):
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
self.google_oauth2_test(token_response, account_response)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
email = "newuser@zulip.com"
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url)
key_match = re.search('value="(?P<key>[0-9a-f]+)" name="key"', result.content.decode("utf-8"))
name_match = re.search('value="(?P<name>[^"]+)" name="full_name"', result.content.decode("utf-8"))
# This goes through a brief stop on a page that auto-submits via JS
result = self.client_post('/accounts/register/',
{'full_name': name_match.group("name"),
'key': key_match.group("key"),
'from_confirmation': "1"})
self.assertEqual(result.status_code, 200)
result = self.client_post('/accounts/register/',
{'full_name': "New User",
'password': 'test_password',
'key': key_match.group("key"),
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://testserver/")
def test_google_oauth2_wrong_subdomain(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
result = self.google_oauth2_test(token_response, account_response)
self.assertIn('subdomain=1', result.url)
def test_google_oauth2_400_token_response(self):
# type: () -> None
token_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, None)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"User error converting Google oauth2 login to token: Response text")
def test_google_oauth2_500_token_response(self):
# type: () -> None
token_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, None)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Could not convert google oauth2 code to access_token: Response text")
def test_google_oauth2_400_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making info API call: Response text")
def test_google_oauth2_500_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making API call: Response text")
def test_google_oauth2_no_fullname(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(givenName="Test", familyName="User"),
emails=[dict(type="account",
value="hamlet@zulip.com")])
account_response = ResponseMock(200, account_data)
self.google_oauth2_test(token_response, account_response)
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_google_oauth2_account_response_no_email(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[])
account_response = ResponseMock(200, account_data)
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertIn("Google oauth2 account email not found:", m.call_args_list[0][0][0])
def test_google_oauth2_error_access_denied(self):
# type: () -> None
result = self.client_get("/accounts/login/google/done/?error=access_denied")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result.url).path
self.assertEqual(path, "/")
def test_google_oauth2_error_other(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?error=some_other_error")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Error from google oauth2 login: some_other_error")
def test_google_oauth2_missing_csrf(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_malformed(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_badstate(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate:otherbadstate:")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Google oauth2 CSRF error')
class FetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_success(result)
def test_wrong_password(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 403)
def test_password_auth_disabled(self):
# type: () -> None
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Password auth is disabled", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_auth_email_auth_disabled_success(self):
# type: () -> None
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="testing"))
self.assert_json_success(result)
self.mock_ldap.reset()
self.mock_initialize.stop()
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
class DevFetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data["email"], self.email)
self.assertEqual(data['api_key'], self.user_profile.api_key)
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class DevGetEmailsTest(ZulipTestCase):
def test_success(self):
# type: () -> None
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_success(result)
self.assert_in_response("direct_admins", result)
self.assert_in_response("direct_users", result)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class FetchAuthBackends(ZulipTestCase):
def test_fetch_auth_backend_format(self):
# type: () -> None
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(set(data.keys()),
{'msg', 'password', 'google', 'dev', 'result'})
for backend in set(data.keys()) - {'msg', 'result'}:
self.assertTrue(isinstance(data[backend], bool))
def test_fetch_auth_backend(self):
# type: () -> None
backends = [GoogleMobileOauth2Backend(), DevAuthBackend()]
with mock.patch('django.contrib.auth.get_backends', return_value=backends):
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'password': False,
'google': True,
'dev': True,
'result': 'success',
})
class TestDevAuthBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = 'hamlet@zulip.com'
data = {'direct_email': email}
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
with self.assertRaisesRegex(Exception, 'Direct login not supported.'):
try:
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
except ImportError:
with mock.patch('django.core.handlers.base.logger'):
self.client_post('/accounts/login/local/', data)
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
email = 'nonexisting@zulip.com'
data = {'direct_email': email}
with self.assertRaisesRegex(Exception, 'User cannot login'):
try:
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
except ImportError:
with mock.patch('django.core.handlers.base.logger'):
self.client_post('/accounts/login/local/', data)
class TestZulipRemoteUserBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_authenticate_with_missing_user(self):
# type: () -> None
backend = ZulipRemoteUserBackend()
self.assertIs(backend.authenticate(None), None)
def test_login_success_with_sso_append_domain(self):
# type: () -> None
username = 'hamlet'
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',),
SSO_APPEND_DOMAIN='zulip.com'):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=username)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = 'hamlet@zulip.com'
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_nonexisting_user(self):
# type: () -> None
email = 'nonexisting@zulip.com'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_missing_field(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/')
self.assert_json_error_contains(result, "No REMOTE_USER set.", 400)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
email = 'hamlet@zulip.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
result = self.client_post('http://testserver:9080/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assertIn(b"Let's get started", result.content)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
email = 'hamlet@zulip.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
result = self.client_post('http://testserver:9080/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assertIn(b"Let's get started", result.content)
def test_login_success_under_subdomains(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), user_profile.id)
class TestJWTLogin(ZulipTestCase):
"""
JWT uses ZulipDummyBackend.
"""
def test_login_success(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
email = 'hamlet@zulip.com'
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
user_profile = get_user_profile_by_email(email)
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure_when_user_is_missing(self):
# type: () -> None
payload = {'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No user specified in JSON web token claims", 400)
def test_login_failure_when_realm_is_missing(self):
# type: () -> None
payload = {'user': 'hamlet'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No realm specified in JSON web token claims", 400)
def test_login_failure_when_key_does_not_exist(self):
# type: () -> None
data = {'json_web_token': 'not relevant'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Auth key for this subdomain not found.", 400)
def test_login_failure_when_key_is_missing(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
def test_login_failure_when_bad_token_is_passed(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
data = {'json_web_token': 'bad token'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Bad JSON web token", 400)
def test_login_failure_when_user_does_not_exist(self):
# type: () -> None
payload = {'user': 'nonexisting', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'acme': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
auth_key = settings.JWT_AUTH_KEYS['acme']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_success_under_subdomains(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(REALMS_HAVE_SUBDOMAINS=True, JWT_AUTH_KEYS={'zulip': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
email = 'hamlet@zulip.com'
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class TestLDAP(ZulipTestCase):
def setUp(self):
# type: () -> None
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
self.setup_subdomain(user_profile)
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
# Internally `_realm` attribute is automatically set by the
# `authenticate()` method. But for testing the `get_or_create_user()`
# method separately, we need to set it manually.
self.backend._realm = get_realm('zulip')
def tearDown(self):
# type: () -> None
self.mock_ldap.reset()
self.mock_initialize.stop()
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing')
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_password(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('hamlet@zulip.com', 'wrong')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('nonexistent@zulip.com', 'testing')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_permissions(self):
# type: () -> None
backend = self.backend
self.assertFalse(backend.has_perm(None, None))
self.assertFalse(backend.has_module_perms(None, None))
self.assertTrue(backend.get_all_permissions(None, None) == set())
self.assertTrue(backend.get_group_permissions(None, None) == set())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_to_django_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.ldap_to_django_username('"hamlet@test"')
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_exists(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
backend = self.backend
email = 'hamlet@zulip.com'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertFalse(created)
self.assertEqual(user_profile.email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_does_not_exist(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertTrue(created)
self.assertEqual(user_profile.email, email)
self.assertEqual(user_profile.full_name, 'Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_has_invalid_name(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['<invalid name>'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
with self.assertRaisesRegex(Exception, "Invalid characters in name!"):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_realm_is_deactivated(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = 'nonexisting@zulip.com'
do_deactivate_realm(backend._realm)
with self.assertRaisesRegex(Exception, 'Realm has been deactivated'):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username_when_domain_does_not_match(self):
# type: () -> None
backend = self.backend
email = 'hamlet@zulip.com'
with self.assertRaisesRegex(Exception, 'Username does not match LDAP domain.'):
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
backend.django_to_ldap_username(email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='acme')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_subdomain_is_none(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain=None)
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate('hamlet@zulip.com', 'testing',
realm_subdomain='zulip')
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_user_does_not_exist_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=nonexisting,ou=users,dc=acme,dc=com': {
'cn': ['NonExisting', ],
'userPassword': 'testing'
}
}
with self.settings(
REALMS_HAVE_SUBDOMAINS=True,
LDAP_APPEND_DOMAIN='acme.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=acme,dc=com'):
user_profile = self.backend.authenticate('nonexisting@acme.com', 'testing',
realm_subdomain='zulip')
self.assertEqual(user_profile.email, 'nonexisting@acme.com')
self.assertEqual(user_profile.full_name, 'NonExisting')
self.assertEqual(user_profile.realm.string_id, 'zulip')
class TestZulipLDAPUserPopulator(ZulipTestCase):
def test_authenticate(self):
# type: () -> None
backend = ZulipLDAPUserPopulator()
result = backend.authenticate('hamlet@zulip.com', 'testing') # type: ignore # complains that the function does not return any value!
self.assertIs(result, None)
class TestZulipAuthMixin(ZulipTestCase):
def test_get_user(self):
# type: () -> None
backend = ZulipAuthMixin()
result = backend.get_user(11111)
self.assertIs(result, None)
class TestPasswordAuthEnabled(ZulipTestCase):
def test_password_auth_enabled_for_ldap(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(password_auth_enabled(realm))
class TestMaybeSendToRegistration(ZulipTestCase):
def test_sso_only_when_preregistration_user_does_not_exist(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 0)
result = maybe_send_to_registration(request, 'hamlet@zulip.com')
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
self.assert_in_response('value="{0}" name="key"'.format(confirmation_key), result)
def test_sso_only_when_preregistration_user_exists(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
email = 'hamlet@zulip.com'
user = PreregistrationUser(email=email)
user.save()
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = maybe_send_to_registration(request, email)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
class TestAdminSetBackends(ZulipTestCase):
def test_change_enabled_backends(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True})})
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertFalse(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_disable_all_backends(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': False})})
self.assert_json_error(result, 'At least one authentication method must be enabled.', status_code=403)
realm = get_realm('zulip')
self.assertTrue(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_supported_backends_only_updated(self):
# type: () -> None
# Log in as admin
self.login("iago@zulip.com")
# Set some supported and unsupported backends
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True, u'GitHub': False})})
self.assert_json_success(result)
realm = get_realm('zulip')
# Check that unsupported backend is not enabled
self.assertFalse(github_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
self.assertFalse(password_auth_enabled(realm))
|
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.elb.listener import Listener
from boto.ec2.elb.listelement import ListElement
from boto.ec2.elb.policies import Policies, OtherPolicy
from boto.ec2.elb.securitygroup import SecurityGroup
from boto.ec2.instanceinfo import InstanceInfo
from boto.resultset import ResultSet
class Backend(object):
"""Backend server description"""
def __init__(self, connection=None):
self.connection = connection
self.instance_port = None
self.policies = None
def __repr__(self):
return 'Backend(%r:%r)' % (self.instance_port, self.policies)
def startElement(self, name, attrs, connection):
if name == 'PolicyNames':
self.policies = ResultSet([('member', OtherPolicy)])
return self.policies
def endElement(self, name, value, connection):
if name == 'InstancePort':
self.instance_port = int(value)
return
class LoadBalancerZones(object):
"""
Used to collect the zones for a Load Balancer when enable_zones
or disable_zones are called.
"""
def __init__(self, connection=None):
self.connection = connection
self.zones = ListElement()
def startElement(self, name, attrs, connection):
if name == 'AvailabilityZones':
return self.zones
def endElement(self, name, value, connection):
pass
class LoadBalancer(object):
"""
Represents an EC2 Load Balancer.
"""
def __init__(self, connection=None, name=None, endpoints=None):
"""
:ivar boto.ec2.elb.ELBConnection connection: The connection this load
balancer was instance was instantiated from.
:ivar list listeners: A list of tuples in the form of
``(<Inbound port>, <Outbound port>, <Protocol>)``
:ivar boto.ec2.elb.healthcheck.HealthCheck health_check: The health
check policy for this load balancer.
:ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and
other policies.
:ivar str dns_name: The external DNS name for the balancer.
:ivar str created_time: A date+time string showing when the
load balancer was created.
:ivar list instances: A list of :py:class:`boto.ec2.instanceinfo.InstanceInfo`
instances, representing the EC2 instances this load balancer is
distributing requests to.
:ivar list availability_zones: The availability zones this balancer
covers.
:ivar str canonical_hosted_zone_name: Current CNAME for the balancer.
:ivar str canonical_hosted_zone_name_id: The Route 53 hosted zone
ID of this balancer. Needed when creating an Alias record in a
Route 53 hosted zone.
:ivar boto.ec2.elb.securitygroup.SecurityGroup source_security_group:
The security group that you can use as part of your inbound rules
for your load balancer back-end instances to disallow traffic
from sources other than your load balancer.
:ivar list subnets: A list of subnets this balancer is on.
:ivar list security_groups: A list of additional security groups that
have been applied.
:ivar str vpc_id: The ID of the VPC that this ELB resides within.
:ivar list backends: A list of :py:class:`boto.ec2.elb.loadbalancer.Backend
back-end server descriptions.
"""
self.connection = connection
self.name = name
self.listeners = None
self.health_check = None
self.policies = None
self.dns_name = None
self.created_time = None
self.instances = None
self.availability_zones = ListElement()
self.canonical_hosted_zone_name = None
self.canonical_hosted_zone_name_id = None
self.source_security_group = None
self.subnets = ListElement()
self.security_groups = ListElement()
self.vpc_id = None
self.scheme = None
self.backends = None
def __repr__(self):
return 'LoadBalancer:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'HealthCheck':
self.health_check = HealthCheck(self)
return self.health_check
elif name == 'ListenerDescriptions':
self.listeners = ResultSet([('member', Listener)])
return self.listeners
elif name == 'AvailabilityZones':
return self.availability_zones
elif name == 'Instances':
self.instances = ResultSet([('member', InstanceInfo)])
return self.instances
elif name == 'Policies':
self.policies = Policies(self)
return self.policies
elif name == 'SourceSecurityGroup':
self.source_security_group = SecurityGroup()
return self.source_security_group
elif name == 'Subnets':
return self.subnets
elif name == 'SecurityGroups':
return self.security_groups
elif name == 'VPCId':
pass
elif name == "BackendServerDescriptions":
self.backends = ResultSet([('member', Backend)])
return self.backends
else:
return None
def endElement(self, name, value, connection):
if name == 'LoadBalancerName':
self.name = value
elif name == 'DNSName':
self.dns_name = value
elif name == 'CreatedTime':
self.created_time = value
elif name == 'InstanceId':
self.instances.append(value)
elif name == 'CanonicalHostedZoneName':
self.canonical_hosted_zone_name = value
elif name == 'CanonicalHostedZoneNameID':
self.canonical_hosted_zone_name_id = value
elif name == 'VPCId':
self.vpc_id = value
elif name == 'Scheme':
self.scheme = value
else:
setattr(self, name, value)
def enable_zones(self, zones):
"""
Enable availability zones to this Access Point.
All zones must be in the same region as the Access Point.
:type zones: string or List of strings
:param zones: The name of the zone(s) to add.
"""
if isinstance(zones, str) or isinstance(zones, unicode):
zones = [zones]
new_zones = self.connection.enable_availability_zones(self.name, zones)
self.availability_zones = new_zones
def disable_zones(self, zones):
"""
Disable availability zones from this Access Point.
:type zones: string or List of strings
:param zones: The name of the zone(s) to add.
"""
if isinstance(zones, str) or isinstance(zones, unicode):
zones = [zones]
new_zones = self.connection.disable_availability_zones(self.name, zones)
self.availability_zones = new_zones
def register_instances(self, instances):
"""
Adds instances to this load balancer. All instances must be in the same
region as the load balancer. Adding endpoints that are already
registered with the load balancer has no effect.
:param list instances: List of instance IDs (strings) that you'd like
to add to this load balancer.
"""
if isinstance(instances, str) or isinstance(instances, unicode):
instances = [instances]
new_instances = self.connection.register_instances(self.name,
instances)
self.instances = new_instances
def deregister_instances(self, instances):
"""
Remove instances from this load balancer. Removing instances that are
not registered with the load balancer has no effect.
:param list instances: List of instance IDs (strings) that you'd like
to remove from this load balancer.
"""
if isinstance(instances, str) or isinstance(instances, unicode):
instances = [instances]
new_instances = self.connection.deregister_instances(self.name,
instances)
self.instances = new_instances
def delete(self):
"""
Delete this load balancer.
"""
return self.connection.delete_load_balancer(self.name)
def configure_health_check(self, health_check):
"""
Configures the health check behavior for the instances behind this
load balancer. See :ref:`elb-configuring-a-health-check` for a
walkthrough.
:param boto.ec2.elb.healthcheck.HealthCheck health_check: A
HealthCheck instance that tells the load balancer how to check
its instances for health.
"""
return self.connection.configure_health_check(self.name, health_check)
def get_instance_health(self, instances=None):
"""
Returns a list of :py:class:`boto.ec2.elb.instancestate.InstanceState`
objects, which show the health of the instances attached to this
load balancer.
:rtype: list
:returns: A list of
:py:class:`InstanceState <boto.ec2.elb.instancestate.InstanceState>`
instances, representing the instances
attached to this load balancer.
"""
return self.connection.describe_instance_health(self.name, instances)
def create_listeners(self, listeners):
return self.connection.create_load_balancer_listeners(self.name,
listeners)
def create_listener(self, inPort, outPort=None, proto="tcp"):
if outPort == None:
outPort = inPort
return self.create_listeners([(inPort, outPort, proto)])
def delete_listeners(self, listeners):
return self.connection.delete_load_balancer_listeners(self.name,
listeners)
def delete_listener(self, inPort):
return self.delete_listeners([inPort])
def delete_policy(self, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
return self.connection.delete_lb_policy(self.name, policy_name)
def set_policies_of_listener(self, lb_port, policies):
return self.connection.set_lb_policies_of_listener(self.name,
lb_port,
policies)
def set_policies_of_backend_server(self, instance_port, policies):
return self.connection.set_lb_policies_of_backend_server(self.name,
instance_port,
policies)
def create_cookie_stickiness_policy(self, cookie_expiration_period,
policy_name):
return self.connection.create_lb_cookie_stickiness_policy(cookie_expiration_period, self.name, policy_name)
def create_app_cookie_stickiness_policy(self, name, policy_name):
return self.connection.create_app_cookie_stickiness_policy(name,
self.name,
policy_name)
def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id):
return self.connection.set_lb_listener_SSL_certificate(self.name,
lb_port,
ssl_certificate_id)
def create_lb_policy(self, policy_name, policy_type, policy_attribute):
return self.connection.create_lb_policy(self.name, policy_name, policy_type, policy_attribute)
def attach_subnets(self, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type subnets: string or List of strings
:param subnets: The name of the subnet(s) to add.
"""
if isinstance(subnets, str) or isinstance(subnets, unicode):
subnets = [subnets]
new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets)
self.subnets = new_subnets
def detach_subnets(self, subnets):
"""
Detaches load balancer from one or more subnets.
:type subnets: string or List of strings
:param subnets: The name of the subnet(s) to detach.
"""
if isinstance(subnets, str) or isinstance(subnets, unicode):
subnets = [subnets]
new_subnets = self.connection.detach_lb_from_subnets(self.name, subnets)
self.subnets = new_subnets
def apply_security_groups(self, security_groups):
"""
Applies security groups to the load balancer.
Applying security groups that are already registered with the
Load Balancer has no effect.
:type security_groups: string or List of strings
:param security_groups: The name of the security group(s) to add.
"""
if isinstance(security_groups, str) or \
isinstance(security_groups, unicode):
security_groups = [security_groups]
new_sgs = self.connection.apply_security_groups_to_lb(
self.name, security_groups)
self.security_groups = new_sgs
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MultiModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.models import slicenet
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def conv_res_step(x, hparams, padding, mask):
"""One step of convolutions and mid-residual."""
k = (hparams.kernel_height, hparams.kernel_width)
k2 = (hparams.large_kernel_size, 1)
dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)]
dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)]
with tf.variable_scope("conv_res_step"):
y = common_layers.subseparable_conv_block(
x,
hparams.filter_size,
dilations_and_kernels1,
padding=padding,
mask=mask,
separabilities=0,
name="residual1")
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
return common_layers.subseparable_conv_block(
y,
hparams.hidden_size,
dilations_and_kernels2,
padding=padding,
mask=mask,
separabilities=0,
name="residual2")
def residual_fn2(x, y, hparams):
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
return common_layers.layer_norm(x + y)
def residual_fn3(x, y, z, hparams):
y = tf.nn.dropout(y, 1.0 - hparams.dropout)
z = tf.nn.dropout(z, 1.0 - hparams.dropout)
return common_layers.layer_norm(x + y + z)
def conv_experts(xs, hparams, dp, ps, padding, mask, layer_id):
"""Convolutions + Mixture-of-Experts layer."""
del layer_id # Unused.
train = hparams.mode == tf.estimator.ModeKeys.TRAIN,
conv_out = dp(conv_res_step, xs, hparams, padding, mask)
loss = 0.0
moe_hidden_sizes = [hparams.filter_size]
expert_fn = expert_utils.ffn_expert_fn(hparams.hidden_size, moe_hidden_sizes,
hparams.hidden_size)
moe_out, loss = expert_utils.distributed_moe(
dp,
ps,
xs,
train,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=1.0)
return dp(residual_fn3, xs, moe_out, conv_out, hparams), loss
def prepare_decoder(targets, target_space_emb):
"""Prepare decoder."""
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(tf.shape(targets)[1]))
target_space_emb = tf.reshape(target_space_emb, [1, 1, -1])
target_space_emb = tf.tile(target_space_emb, [tf.shape(targets)[0], 1, 1])
decoder_input = common_layers.shift_right_3d(
targets, pad_value=target_space_emb)
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
return (decoder_input, decoder_self_attention_bias)
@registry.register_model
class MultiModel(t2t_model.T2TModel):
"""Model to train on multiple tasks simultaneously."""
@property
def use_body_sharded(self):
return True
def body_sharded(self, sharded_features):
train = self._hparams.mode == tf.estimator.ModeKeys.TRAIN
dp = self._data_parallelism
hparams = self._hparams
def project_to_hidden(inputs):
return common_layers.conv_block(
inputs,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True)
def flatten(inputs):
return tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
# Project to hidden size if necessary
if (sharded_features["inputs"][0].get_shape().as_list()[-1] !=
hparams.hidden_size):
inputs = dp(project_to_hidden, sharded_features["inputs"])
inputs = dp(flatten, inputs)
inputs_pad = dp(slicenet.embedding_to_padding, inputs)
inputs_mask = dp(lambda x: 1.0 - x, inputs_pad)
inputs_encoded = dp(common_layers.add_timing_signal, inputs)
expert_loss = 0.0
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("enc_layer_%d" % i):
inputs_encoded, moe_loss = conv_experts(inputs_encoded, hparams, dp,
self._ps_devices, "SAME",
inputs_mask, i)
expert_loss += tf.reduce_mean(moe_loss) * hparams.moe_loss_coef
# If we're just predicing a class, there is no use for a decoder, return.
if isinstance(self._problem_hparams.target_modality,
modalities.ClassLabelModality):
return inputs_encoded, tf.reduce_mean(expert_loss)
# Decoder.
inputs3d = dp(tf.squeeze, inputs, 2)
inputs_encoded3d = dp(tf.squeeze, inputs_encoded, 2)
encoder_padding = dp(common_attention.embedding_to_padding, inputs3d)
encoder_attention_bias = dp(common_attention.attention_bias_ignore_padding,
encoder_padding)
targets = dp(common_layers.flatten4d3d, sharded_features["targets"])
target_space_emb = dp(slicenet.embed_target_space,
sharded_features["target_space_id"],
hparams.hidden_size)
(decoder_input, decoder_self_attention_bias) = dp(prepare_decoder, targets,
target_space_emb)
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
x = dp(tf.nn.dropout, decoder_input, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope("dec_layer_%d" % layer):
with tf.variable_scope("attention"):
y = dp(
common_attention.multihead_attention,
x,
None,
decoder_self_attention_bias,
hparams.hidden_size,
hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
name="decoder_self_attention")
z = dp(
common_attention.multihead_attention,
y,
inputs_encoded3d,
encoder_attention_bias,
hparams.hidden_size,
hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
name="encdec_attention")
x = dp(residual_fn3, x, y, z, hparams)
with tf.variable_scope("ffn"):
if str(layer) in hparams.moe_layers.split(","):
y, moe_loss = expert_utils.distributed_moe(
dp,
self._ps_devices,
x,
train,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
expert_loss += tf.reduce_mean(moe_loss)
else:
y = dp(
common_layers.conv_hidden_relu,
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.dropout)
x = dp(residual_fn2, x, y, hparams)
x = dp(tf.expand_dims, x, 2)
return x, tf.reduce_mean(expert_loss)
@registry.register_hparams
def multimodel_base():
"""Base parameters for MultiModel."""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 2048
hparams.num_hidden_layers = 4
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.dropout = 0.1
hparams.add_hparam("filter_size", 2048) # Add new ones like this.
hparams.add_hparam("large_kernel_size", 15)
hparams.add_hparam("attention_dropout", 0.1)
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("moe_layers", "2")
hparams.moe_num_experts = 30
return hparams
@registry.register_hparams
def multimodel_tiny():
"""Tiny parameters for MultiModel."""
hparams = multimodel_base()
hparams.hidden_size = 128
hparams.filter_size = 512
hparams.batch_size = 512
hparams.num_hidden_layers = 2
hparams.moe_n1 = 10
hparams.moe_layers = "0"
return hparams
|
|
from collections import OrderedDict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth import views as auth_views
from django.db import transaction
from django.forms import Media
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy, override
from django.views.decorators.debug import sensitive_post_parameters
from wagtail.admin.forms.account import (
AvatarPreferencesForm, LocalePreferencesForm, NameEmailForm, NotificationPreferencesForm)
from wagtail.admin.forms.auth import LoginForm, PasswordChangeForm, PasswordResetForm
from wagtail.admin.localization import get_available_admin_languages, get_available_admin_time_zones
from wagtail.core import hooks
from wagtail.core.log_actions import log
from wagtail.core.models import UserPagePermissionsProxy
from wagtail.users.models import UserProfile
from wagtail.utils.loading import get_custom_form
def get_user_login_form():
form_setting = 'WAGTAILADMIN_USER_LOGIN_FORM'
if hasattr(settings, form_setting):
return get_custom_form(form_setting)
else:
return LoginForm
# Helper functions to check password management settings to enable/disable views as appropriate.
# These are functions rather than class-level constants so that they can be overridden in tests
# by override_settings
def password_management_enabled():
return getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True)
def email_management_enabled():
return getattr(settings, 'WAGTAIL_EMAIL_MANAGEMENT_ENABLED', True)
def password_reset_enabled():
return getattr(settings, 'WAGTAIL_PASSWORD_RESET_ENABLED', password_management_enabled())
# Tabs
class SettingsTab:
def __init__(self, name, title, order=0):
self.name = name
self.title = title
self.order = order
profile_tab = SettingsTab('profile', gettext_lazy("Profile"), order=100)
notifications_tab = SettingsTab('notifications', gettext_lazy("Notifications"), order=200)
# Panels
class BaseSettingsPanel:
name = ''
title = ''
tab = profile_tab
help_text = None
template_name = 'wagtailadmin/account/settings_panels/base.html'
form_class = None
form_object = 'user'
def __init__(self, request, user, profile):
self.request = request
self.user = user
self.profile = profile
def is_active(self):
"""
Returns True to display the panel.
"""
return True
def get_form(self):
"""
Returns an initialised form.
"""
kwargs = {
'instance': self.profile if self.form_object == 'profile' else self.user,
'prefix': self.name
}
if self.request.method == 'POST':
return self.form_class(self.request.POST, self.request.FILES, **kwargs)
else:
return self.form_class(**kwargs)
def get_context_data(self):
"""
Returns the template context to use when rendering the template.
"""
return {
'form': self.get_form()
}
def render(self):
"""
Renders the panel using the template specified in .template_name and context from .get_context_data()
"""
return render_to_string(self.template_name, self.get_context_data(), request=self.request)
class NameEmailSettingsPanel(BaseSettingsPanel):
name = 'name_email'
title = gettext_lazy('Name and Email')
order = 100
form_class = NameEmailForm
class AvatarSettingsPanel(BaseSettingsPanel):
name = 'avatar'
title = gettext_lazy('Profile picture')
order = 300
template_name = 'wagtailadmin/account/settings_panels/avatar.html'
form_class = AvatarPreferencesForm
form_object = 'profile'
class NotificationsSettingsPanel(BaseSettingsPanel):
name = 'notifications'
title = gettext_lazy('Notifications')
tab = notifications_tab
order = 100
form_class = NotificationPreferencesForm
form_object = 'profile'
def is_active(self):
# Hide the panel if the user can't edit or publish pages
user_perms = UserPagePermissionsProxy(self.request.user)
if not user_perms.can_edit_pages() and not user_perms.can_publish_pages():
return False
# Hide the panel if there are no notification preferences
return self.get_form().fields
class LocaleSettingsPanel(BaseSettingsPanel):
name = 'locale'
title = gettext_lazy('Locale')
order = 400
form_class = LocalePreferencesForm
form_object = 'profile'
def is_active(self):
return len(get_available_admin_languages()) > 1 or len(get_available_admin_time_zones()) > 1
class ChangePasswordPanel(BaseSettingsPanel):
name = 'password'
title = gettext_lazy('Password')
order = 500
form_class = PasswordChangeForm
def is_active(self):
return password_management_enabled() and self.user.has_usable_password()
def get_form(self):
# Note: don't bind the form unless a field is specified
# This prevents the validation error from displaying if the user wishes to ignore this
bind_form = False
if self.request.method == 'POST':
bind_form = any([
self.request.POST.get(self.name + '-old_password'),
self.request.POST.get(self.name + '-new_password1'),
self.request.POST.get(self.name + '-new_password2'),
])
if bind_form:
return self.form_class(self.user, self.request.POST, prefix=self.name)
else:
return self.form_class(self.user, prefix=self.name)
# Views
@sensitive_post_parameters()
def account(request):
# Fetch the user and profile objects once and pass into each panel
# We need to use the same instances for all forms so they don't overwrite each other
user = request.user
profile = UserProfile.get_for_user(user)
# Panels
panels = [
NameEmailSettingsPanel(request, user, profile),
AvatarSettingsPanel(request, user, profile),
NotificationsSettingsPanel(request, user, profile),
LocaleSettingsPanel(request, user, profile),
ChangePasswordPanel(request, user, profile),
]
for fn in hooks.get_hooks('register_account_settings_panel'):
panel = fn(request, user, profile)
if panel and panel.is_active():
panels.append(panel)
panels = [panel for panel in panels if panel.is_active()]
# Get tabs and order them
tabs = list(set(panel.tab for panel in panels))
tabs.sort(key=lambda tab: tab.order)
# Get dict of tabs to ordered panels
panels_by_tab = OrderedDict([(tab, []) for tab in tabs])
for panel in panels:
panels_by_tab[panel.tab].append(panel)
for tab, tab_panels in panels_by_tab.items():
tab_panels.sort(key=lambda panel: panel.order)
panel_forms = [panel.get_form() for panel in panels]
if request.method == 'POST':
if all(form.is_valid() or not form.is_bound for form in panel_forms):
with transaction.atomic():
for form in panel_forms:
if form.is_bound:
form.save()
log(user, 'wagtail.edit')
# Prevent a password change from logging this user out
update_session_auth_hash(request, user)
# Override the language when creating the success message
# If the user has changed their language in this request, the message should
# be in the new language, not the existing one
with override(profile.get_preferred_language()):
messages.success(request, _("Your account settings have been changed successfully!"))
return redirect('wagtailadmin_account')
media = Media()
for form in panel_forms:
media += form.media
# Menu items
menu_items = []
for fn in hooks.get_hooks('register_account_menu_item'):
item = fn(request)
if item:
menu_items.append(item)
return TemplateResponse(request, 'wagtailadmin/account/account.html', {
'panels_by_tab': panels_by_tab,
'menu_items': menu_items,
'media': media,
})
class PasswordResetEnabledViewMixin:
"""
Class based view mixin that disables the view if password reset is disabled by one of the following settings:
- WAGTAIL_PASSWORD_RESET_ENABLED
- WAGTAIL_PASSWORD_MANAGEMENT_ENABLED
"""
def dispatch(self, *args, **kwargs):
if not password_reset_enabled():
raise Http404
return super().dispatch(*args, **kwargs)
class PasswordResetView(PasswordResetEnabledViewMixin, auth_views.PasswordResetView):
template_name = 'wagtailadmin/account/password_reset/form.html'
email_template_name = 'wagtailadmin/account/password_reset/email.txt'
subject_template_name = 'wagtailadmin/account/password_reset/email_subject.txt'
form_class = PasswordResetForm
success_url = reverse_lazy('wagtailadmin_password_reset_done')
class PasswordResetDoneView(PasswordResetEnabledViewMixin, auth_views.PasswordResetDoneView):
template_name = 'wagtailadmin/account/password_reset/done.html'
class PasswordResetConfirmView(PasswordResetEnabledViewMixin, auth_views.PasswordResetConfirmView):
template_name = 'wagtailadmin/account/password_reset/confirm.html'
success_url = reverse_lazy('wagtailadmin_password_reset_complete')
class PasswordResetCompleteView(PasswordResetEnabledViewMixin, auth_views.PasswordResetCompleteView):
template_name = 'wagtailadmin/account/password_reset/complete.html'
class LoginView(auth_views.LoginView):
template_name = 'wagtailadmin/login.html'
def get_success_url(self):
return self.get_redirect_url() or reverse('wagtailadmin_home')
def get(self, *args, **kwargs):
# If user is already logged in, redirect them to the dashboard
if self.request.user.is_authenticated and self.request.user.has_perm('wagtailadmin.access_admin'):
return redirect(self.get_success_url())
return super().get(*args, **kwargs)
def get_form_class(self):
return get_user_login_form()
def form_valid(self, form):
response = super().form_valid(form)
remember = form.cleaned_data.get('remember')
if remember:
self.request.session.set_expiry(settings.SESSION_COOKIE_AGE)
else:
self.request.session.set_expiry(0)
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['show_password_reset'] = password_reset_enabled()
from django.contrib.auth import get_user_model
User = get_user_model()
context['username_field'] = User._meta.get_field(User.USERNAME_FIELD).verbose_name
return context
class LogoutView(auth_views.LogoutView):
next_page = 'wagtailadmin_login'
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
messages.success(self.request, _('You have been successfully logged out.'))
# By default, logging out will generate a fresh sessionid cookie. We want to use the
# absence of sessionid as an indication that front-end pages are being viewed by a
# non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH
)
# HACK: pretend that the session hasn't been modified, so that SessionMiddleware
# won't override the above and write a new cookie.
self.request.session.modified = False
return response
|
|
#!/usr/bin/env python3
from auto.config import ARM, GOPTS, RV32, SBT, TOOLS, X86
from auto.utils import cat, path, unique
class ArchAndMode:
def __init__(self, farch, narch, mode=None):
self.farch = farch
self.narch = narch
self.mode = mode
# get arch prefix string
def prefix(self):
if not self.farch:
return self.narch.prefix
elif not self.narch:
return self.farch.prefix
else:
return self.farch.add_prefix(self.narch.prefix)
# get output file name for this ArchAndMode
def bin(self, name):
if not self.prefix():
raise Exception("No prefix")
if not name:
raise Exception("No name")
return (self.prefix() + "-" + name +
("-" + self.mode if self.mode else ""));
@staticmethod
def sfmt(templ, prefix, mode):
return templ.format(**{
"prefix": prefix,
"mode": mode,
})
def fmt(self, templ):
prefix = self.prefix()
if prefix:
prefix = prefix + '-'
else:
prefix = ''
mode = self.mode
if mode:
mode = '-' + mode
else:
mode = ''
return self.sfmt(templ, prefix, mode)
# arguments for a given run
class Run:
def __init__(self, args, id=None, rflags=None, outidx=None, stdin=None):
self.args = args
self.rflags_arg = rflags
self.id = id
self.outidx = outidx
self.stdin = stdin
@staticmethod
def out_suffix():
return ".out"
@staticmethod
def build_name(am, base, id, suffix):
if am:
r = am.bin(base)
else:
r = base
if id:
r = r + '-' + id
if suffix:
r = r + suffix
return r
def bin(self, am, name):
return am.bin(name)
def out(self, am, name):
if self.outidx == None:
if am:
return self.build_name(am, name, self.id, self.out_suffix())
else:
return None
return am.fmt(self.args[self.outidx])
def build(self, am):
if am:
return [am.fmt(arg) for arg in self.args]
return [ArchAndMode.sfmt(arg, '', '') for arg in self.args]
@staticmethod
def _escape(args):
args2 = []
for arg in args:
if len(arg) > 0 and arg[0] == '-':
arg = '" {}"'.format(arg)
args2.append(arg)
return args2
def str(self, am):
args = self.build(am)
if not args:
return ''
args2 = ["--args"]
args2.extend(self._escape(args))
return " ".join(args2)
def rflags(self, am, name):
args_str = self.str(am)
out = self.out(am, name)
if not self.outidx:
args_str = cat('-o', out, args_str)
if self.stdin:
args_str = cat(args_str, "<", self.stdin)
return cat(self.rflags_arg, args_str)
class Runs:
def __init__(self, runs=[], name=None):
self.runs = runs
self.name = name
def add(self, run):
self.runs.append(run)
def __iter__(self):
return self.runs.__iter__()
class GenMake:
def __init__(self, narchs, xarchs,
srcdir, dstdir, name,
xflags, bflags, mflags, sbtflags=[],
cc=None, rvcc=None, modes=None):
self.narchs = narchs
self.xarchs = xarchs
self.srcdir = srcdir
self.dstdir = dstdir
self.name = name
self.xflags = xflags
self.bflags = bflags
self.mflags = mflags
self.sbtflags = sbtflags
self.cc = cc
self.rvcc = rvcc
self.modes = modes if modes else SBT.modes
#
self.out_filter = None
#
self.txt = "### {} ###\n\n".format(name)
def append(self, txt):
self.txt = self.txt + txt
def append_cc(self, arch, flags):
s = "--cc="
if arch.is_rv32():
s = s + (self.rvcc if self.rvcc else GOPTS.rvcc)
else:
s = s + (self.cc if self.cc else GOPTS.cc)
return cat(flags, s)
def bld(self, arch, ins, out):
out_is_obj = out.endswith(".o")
objs = []
aobjs = []
if len(ins) == 1:
if not out_is_obj:
objs = [arch.out2objname(out)]
aobjs = [path(self.dstdir, objs[0])]
else:
for src in ins:
obj = arch.src2objname(src)
if not out_is_obj or obj != out:
objs.append(obj)
aobjs = [path(self.dstdir, obj) for obj in objs]
ains = [path(self.srcdir, i) for i in ins]
bflags = self.append_cc(arch, self.bflags)
fmtdata = {
"arch": arch.name,
"aobjs": " ".join(aobjs),
"srcdir": self.srcdir,
"dstdir": self.dstdir,
"ins": " ".join(ins),
"ains": " ".join(ains),
"out": out,
"bflags": " " + bflags if bflags else "",
"build": TOOLS.build,
}
self.append("""\
.PHONY: {out}
{out}: {dstdir}/{out}
{dstdir}/{out} {aobjs}: {ains}
\t{build} --arch {arch} --srcdir {srcdir} --dstdir {dstdir} {ins} -o {out}{bflags}
""".format(**fmtdata))
def _ssh_copy(self, fmtdata):
self.append("""\
.PHONY: {tgt}
{tgt}: {out}
\tscp {src} {rem}:{dst}
""".format(**fmtdata))
def _adb_copy(self, fmtdata):
self.append("""\
.PHONY: {tgt}
{tgt}: {out}
\t{rem} push {src} {dst}
""".format(**fmtdata))
def copy(self, am, name):
# don't copy if we're not on an x86 host OR
# if 'out' is a native binary OR
# if 'out' is a RISC-V binary (we can emulate it)
if not X86.is_native() or am.narch.is_native() or am.narch.is_rv32():
return ''
out = am.bin(name)
tgt = out + self.copy_suffix()
srcdir = self.dstdir
src = path(srcdir, out)
dstdir = am.narch.get_remote_path(srcdir)
dst = path(dstdir, out)
fmtdata = {
"out": out,
"tgt": tgt,
"src": src,
"rem": am.narch.rem_host,
"dst": dst,
}
if GOPTS.ssh_copy():
self._ssh_copy(fmtdata)
else:
self._adb_copy(fmtdata)
return tgt
@staticmethod
def mk_arm_dstdir_static(dstdir):
return ("ssh {} mkdir -p {}" if GOPTS.ssh_copy()
else "{} shell mkdir -p {}").format(
ARM.rem_host, ARM.get_remote_path(dstdir))
def mk_arm_dstdir(self, name):
tgt = name + "-arm-dstdir"
self.append("""\
.PHONY: {0}
{0}:
\t{1}
""".format( tgt,
self.mk_arm_dstdir_static(self.dstdir)))
return tgt
def run(self, name, robj, am, dep_bin=True):
dir = self.dstdir
bin = robj.bin(am, name)
suffix = "-" + robj.id if robj.id else ""
rflags = robj.rflags(am, name)
narch = am.narch
fmtdata = {
"arch": narch.name,
"dir": dir,
"bin": bin,
"suffix": suffix,
"rflags": " " + rflags if rflags else "",
"run": TOOLS.run,
"dep": " " + bin if dep_bin else "",
}
self.append("""\
.PHONY: {bin}{suffix}-run
{bin}{suffix}-run:{dep}
\t{run} --arch {arch} --dir {dir} {bin}{rflags}
""".format(**fmtdata))
def xlate(self, am, _in, out):
flags = '--sbtflags " -regs={}"'.format(am.mode)
for flag in self.sbtflags:
flags = flags + ' " {}"'.format(flag)
xflags = self.append_cc(am.narch, self.xflags)
fmtdata = {
"arch": am.narch.name,
"srcdir": self.srcdir,
"dstdir": self.dstdir,
"in": _in,
"out": out,
"xflags": " " + xflags if xflags else "",
"flags": flags,
"xlate": TOOLS.xlate,
}
self.append("""\
.PHONY: {out}
{out}: {dstdir}/{out}
{dstdir}/{out}: {dstdir}/{in}
\t{xlate} --arch {arch} --srcdir {srcdir} --dstdir {dstdir} {in} -o {out}{xflags} {flags}
""".format(**fmtdata))
def _diff(self, f0, f1):
if self.out_filter:
return (
"\tcat {0} | {2} >{0}.filt\n" +
"\tcat {1} | {2} >{1}.filt\n" +
"\tdiff {0}.filt {1}.filt").format(
f0, f1, self.out_filter)
else:
return "\tdiff {0} {1}".format(f0, f1)
def test1(self, run):
id = run.id
if run.outidx:
name = lambda am: run.out(am, name=self.name)
else:
name = lambda am: path(self.dstdir,
Run.build_name(am, self.name, id, Run.out_suffix()))
# gen diffs
diffs = []
def diff(f0, f1):
diffs.append(self._diff(f0, f1))
xams = self._xams()
for xam in xams:
if not xam.narch.is_native():
continue
xout = name(xam)
# foreign
# skip rv32 if on arm
if not xam.narch.is_arm():
fam = ArchAndMode(None, xam.farch)
fout = name(fam)
diff(fout, xout)
# native
if xam.narch in self.narchs:
nam = ArchAndMode(None, xam.narch)
nout = name(nam)
diff(nout, xout)
if GOPTS.rv32 == "rv8":
fam = ArchAndMode(RV32, None)
nam = ArchAndMode(None, X86)
fout = name(fam)
nout = name(nam)
diff(fout, nout)
tname = Run.build_name(None, self.name, id, None)
fmtdata = {
"name": tname,
"runs": " ".join(self.get_all_runs(Runs([run]), self.name)),
"diffs": "\n".join(diffs)
}
self.append("""\
.PHONY: {name}-test
{name}-test: {runs}
{diffs}
""".format(**fmtdata))
return tname + self.test_suffix()
def test(self, runs):
tests = []
for run in runs:
tests.append(self.test1(run))
if len(tests) > 1:
tsuf = self.test_suffix()
self.alias(self.name + tsuf, tests)
def measure(self, robj, dep_bin=True, rv32=False):
args_str = robj.str(None)
suffix = robj.id
mflags = self.mflags
if suffix:
if not mflags:
mflags = []
mflags.extend(["--id", self.name + '-' + suffix])
fmtdata = {
"measure": TOOLS.measure,
"dstdir": self.dstdir,
"name": self.name,
"rv32": " --rv32" if rv32 else "",
"suffix": "-" + suffix if suffix else "",
"args": " " + args_str if args_str else "",
"stdin": " --stdin=" + robj.stdin if robj.stdin else "",
"mflags": " " + " ".join(mflags) if mflags else "",
"dep": " " + self.name if dep_bin else "",
}
self.append("""\
.PHONY: {name}{suffix}-measure
{name}{suffix}-measure:{dep}
\t{measure} {dstdir} {name}{rv32}{args}{stdin}{mflags}
""".format(**fmtdata))
def alias(self, name, aliasees):
fmtdata = {
"name": name,
"aliasees": " ".join(aliasees),
}
self.append("""\
.PHONY: {name}
{name}: {aliasees}
""".format(**fmtdata))
def alias_build_all(self):
mod = self.name
nmods = [arch.add_prefix(mod) for arch in self.narchs]
xmods = [farch.add_prefix(narch.add_prefix(mod)) + "-" + mode
for (farch, narch) in self.xarchs
for mode in self.modes]
fmtdata = {
"mod": mod,
"nmods": " ".join(nmods),
"xmods": " ".join(xmods)
}
self.append("""\
.PHONY: {mod}
{mod}: {nmods} {xmods}
""".format(**fmtdata))
def _farchs(self):
return unique([farch for (farch, narch) in self.xarchs])
def _nams(self):
return [ArchAndMode(None, narch) for narch in self.narchs]
def _xams(self):
return [ArchAndMode(farch, narch, mode)
for (farch, narch) in self.xarchs
for mode in self.modes]
def _ufams(self):
farchs = unique([am.farch for am in self._xams() if am.farch])
return [ArchAndMode(farch, None) for farch in farchs]
def _unams(self):
narchs = unique([narch for narch in self.narchs])
return [ArchAndMode(None, narch) for narch in narchs]
def ams(self):
return self._nams() + self._xams()
def _ntgts(self, name):
return [am.bin(name) for am in self._nams()]
def _xtgts(self, name):
return [am.bin(name) for am in self._xams()]
def tgts(self, name):
return self._ntgts(name) + self._xtgts(name)
def apply_suffixes(self, tgts, suffixes, gsuf=None):
a = []
gsuf = gsuf if gsuf else ''
for tgt in tgts:
for suffix in suffixes:
suf = "-" + suffix if suffix else ""
a.append(tgt + suf + gsuf)
return a
@staticmethod
def run_suffix():
return "-run"
@staticmethod
def test_suffix():
return "-test"
@staticmethod
def copy_suffix():
return "-copy"
@staticmethod
def run_suffixes(runs):
return [r.id if r.id else '' for r in runs]
def get_runs(self, runs, am, name):
return self.apply_suffixes([am.bin(name)],
self.run_suffixes(runs),
self.run_suffix())
def get_all_runs(self, runs, name):
return self.apply_suffixes(self.tgts(name),
self.run_suffixes(runs),
self.run_suffix())
def get_all_tests(self, runs, name):
return self.apply_suffixes(self.tgts(name),
self.run_suffixes(runs),
self.test_suffix())
def alias_run_all(self, runs):
fmtdata = {
"name": self.name,
"runs": " ".join(self.get_all_runs(runs, self.name)),
}
self.append("""\
.PHONY: {name}-run
{name}-run: {runs}
""".format(**fmtdata))
def clean(self):
self.append("""\
.phony: {name}-clean
{name}-clean:
\trm -rf {dstdir}
""".format(**{
"name": self.name,
"dstdir": self.dstdir}))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import six
import testscenarios
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import versioned_method
from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
from nova.tests.unit import utils
class MicroversionedTest(testscenarios.WithScenarios, test.NoDBTestCase):
scenarios = [
('legacy-microversion', {
'header_name': 'X-OpenStack-Nova-API-Version',
}),
('modern-microversion', {
'header_name': 'OpenStack-API-Version',
})
]
def _make_microversion_header(self, value):
if 'nova' in self.header_name.lower():
return {self.header_name: value}
else:
return {self.header_name: 'compute %s' % value}
class RequestTest(MicroversionedTest):
def setUp(self):
super(RequestTest, self).setUp()
self.stub_out('nova.i18n.get_available_languages',
lambda *args, **kwargs:
['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US'])
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = b"asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_from_request(self):
request = wsgi.Request.blank('/')
accepted = 'bogus;q=1, en-gb;q=0.7,en-us,en;q=0.5,*;q=0.7'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'en_US')
def test_asterisk(self):
# asterisk should match first available if there
# are not any other available matches
request = wsgi.Request.blank('/')
accepted = '*,es;q=0.5'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'en_GB')
def test_prefix(self):
request = wsgi.Request.blank('/')
accepted = 'zh'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'zh_CN')
def test_secondary(self):
request = wsgi.Request.blank('/')
accepted = 'nn,en-gb;q=0.5'
request.headers = {'Accept-Language': accepted}
self.assertEqual(request.best_match_language(), 'en_GB')
def test_none_found(self):
request = wsgi.Request.blank('/')
accepted = 'nb-no'
request.headers = {'Accept-Language': accepted}
self.assertIsNone(request.best_match_language())
def test_no_lang_header(self):
request = wsgi.Request.blank('/')
accepted = ''
request.headers = {'Accept-Language': accepted}
self.assertIsNone(request.best_match_language())
def test_api_version_request_header_none(self):
request = wsgi.Request.blank('/')
request.set_api_version_request()
self.assertEqual(api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION), request.api_version_request)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_api_version_request_header(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("2.14")
request = wsgi.Request.blank('/')
request.headers = self._make_microversion_header('2.14')
request.set_api_version_request()
self.assertEqual(api_version.APIVersionRequest("2.14"),
request.api_version_request)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_api_version_request_header_latest(self, mock_maxver):
mock_maxver.return_value = api_version.APIVersionRequest("3.5")
request = wsgi.Request.blank('/')
request.headers = self._make_microversion_header('latest')
request.set_api_version_request()
self.assertEqual(api_version.APIVersionRequest("3.5"),
request.api_version_request)
def test_api_version_request_header_invalid(self):
request = wsgi.Request.blank('/')
request.headers = self._make_microversion_header('2.1.3')
self.assertRaises(exception.InvalidAPIVersionString,
request.set_api_version_request)
class ActionDispatcherTest(test.NoDBTestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class JSONDictSerializerTest(test.NoDBTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_json)
class JSONDeserializerTest(test.NoDBTestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
def test_json_valid_utf8(self):
data = b"""{"server": {"min_count": 1, "flavorRef": "1",
"name": "\xe6\xa6\x82\xe5\xbf\xb5",
"imageRef": "10bab10c-1304-47d",
"max_count": 1}} """
as_dict = {
'body': {
u'server': {
u'min_count': 1, u'flavorRef': u'1',
u'name': u'\u6982\u5ff5',
u'imageRef': u'10bab10c-1304-47d',
u'max_count': 1
}
}
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(deserializer.deserialize(data), as_dict)
def test_json_invalid_utf8(self):
"""Send invalid utf-8 to JSONDeserializer."""
data = b"""{"server": {"min_count": 1, "flavorRef": "1",
"name": "\xf0\x28\x8c\x28",
"imageRef": "10bab10c-1304-47d",
"max_count": 1}} """
deserializer = wsgi.JSONDeserializer()
self.assertRaises(exception.MalformedRequestBody,
deserializer.deserialize, data)
class ResourceTest(MicroversionedTest):
def get_req_id_header_name(self, request):
header_name = 'x-openstack-request-id'
if utils.get_api_version(request) < 3:
header_name = 'x-compute-request-id'
return header_name
def test_resource_receives_api_version_request_default(self):
class Controller(object):
def index(self, req):
if req.api_version_request != \
api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION):
raise webob.exc.HTTPInternalServerError()
return 'success'
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests')
response = req.get_response(app)
self.assertEqual(b'success', response.body)
self.assertEqual(response.status_int, 200)
@mock.patch("nova.api.openstack.api_version_request.max_api_version")
def test_resource_receives_api_version_request(self, mock_maxver):
version = "2.5"
mock_maxver.return_value = api_version.APIVersionRequest(version)
class Controller(object):
def index(self, req):
if req.api_version_request != \
api_version.APIVersionRequest(version):
raise webob.exc.HTTPInternalServerError()
return 'success'
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests')
req.headers = self._make_microversion_header(version)
response = req.get_response(app)
self.assertEqual(b'success', response.body)
self.assertEqual(response.status_int, 200)
def test_resource_receives_api_version_request_invalid(self):
invalid_version = "2.5.3"
class Controller(object):
def index(self, req):
return 'success'
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests')
req.headers = self._make_microversion_header(invalid_version)
response = req.get_response(app)
self.assertEqual(400, response.status_int)
def test_resource_call_with_method_get(self):
class Controller(object):
def index(self, req):
return 'success'
app = fakes.TestRouter(Controller())
# the default method is GET
req = webob.Request.blank('/tests')
response = req.get_response(app)
self.assertEqual(b'success', response.body)
self.assertEqual(response.status_int, 200)
req.body = b'{"body": {"key": "value"}}'
response = req.get_response(app)
self.assertEqual(b'success', response.body)
self.assertEqual(response.status_int, 200)
req.content_type = 'application/json'
response = req.get_response(app)
self.assertEqual(b'success', response.body)
self.assertEqual(response.status_int, 200)
def test_resource_call_with_method_post(self):
class Controller(object):
@wsgi.expected_errors(400)
def create(self, req, body):
if expected_body != body:
msg = "The request body invalid"
raise webob.exc.HTTPBadRequest(explanation=msg)
return "success"
# verify the method: POST
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests', method="POST",
content_type='application/json')
req.body = b'{"body": {"key": "value"}}'
expected_body = {'body': {
"key": "value"
}
}
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(b'success', response.body)
# verify without body
expected_body = None
req.body = None
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(b'success', response.body)
# the body is validated in the controller
expected_body = {'body': None}
response = req.get_response(app)
expected_unsupported_type_body = {'badRequest':
{'message': 'The request body invalid', 'code': 400}}
self.assertEqual(response.status_int, 400)
self.assertEqual(expected_unsupported_type_body,
jsonutils.loads(response.body))
def test_resource_call_with_method_put(self):
class Controller(object):
def update(self, req, id, body):
if expected_body != body:
msg = "The request body invalid"
raise webob.exc.HTTPBadRequest(explanation=msg)
return "success"
# verify the method: PUT
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests/test_id', method="PUT",
content_type='application/json')
req.body = b'{"body": {"key": "value"}}'
expected_body = {'body': {
"key": "value"
}
}
response = req.get_response(app)
self.assertEqual(b'success', response.body)
self.assertEqual(response.status_int, 200)
req.body = None
expected_body = None
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
# verify no content_type is contained in the request
req = webob.Request.blank('/tests/test_id', method="PUT",
content_type='application/xml')
req.content_type = 'application/xml'
req.body = b'{"body": {"key": "value"}}'
response = req.get_response(app)
expected_unsupported_type_body = {'badMediaType':
{'message': 'Unsupported Content-Type', 'code': 415}}
self.assertEqual(response.status_int, 415)
self.assertEqual(expected_unsupported_type_body,
jsonutils.loads(response.body))
def test_resource_call_with_method_delete(self):
class Controller(object):
def delete(self, req, id):
return "success"
# verify the method: DELETE
app = fakes.TestRouter(Controller())
req = webob.Request.blank('/tests/test_id', method="DELETE")
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(b'success', response.body)
# ignore the body
req.body = b'{"body": {"key": "value"}}'
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
self.assertEqual(b'success', response.body)
def test_resource_forbidden(self):
class Controller(object):
def index(self, req):
raise exception.Forbidden()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.status_int, 403)
def test_resource_not_authorized(self):
class Controller(object):
def index(self, req):
raise exception.Unauthorized()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
self.assertRaises(
exception.Unauthorized, req.get_response, app)
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method = resource.get_method(None, 'index', None, '')
actual = resource.dispatch(method, None, {'pants': 'off'})
expected = 'off'
self.assertEqual(actual, expected)
def test_get_method_unknown_controller_method(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(AttributeError, resource.get_method,
None, 'create', None, '')
def test_get_method_action_json(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.MalformedRequestBody, resource.get_method,
None, 'action', 'application/json', '{}')
def test_get_method_unknown_controller_action(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(KeyError, resource.get_method,
None, 'action', 'application/json',
'{"barAction": true}')
def test_get_method_action_method(self):
class Controller(object):
def action(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction')
self.assertEqual(controller.action, method)
def test_get_action_args(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(resource.get_action_args(env), expected)
def test_get_body_bad_content(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/none'
request.body = b'foo'
self.assertRaises(exception.InvalidContentType,
resource.get_body, request)
def test_get_body_no_content_type(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.body = b'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual(b'foo', body)
def test_get_body_no_content_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = b''
content_type, body = resource.get_body(request)
self.assertEqual('application/json', content_type)
self.assertEqual(b'', body)
def test_get_body_content_body_none(self):
resource = wsgi.Resource(None)
request = wsgi.Request.blank('/', method='PUT')
body = None
contents = resource._get_request_content(body, request)
self.assertIn('body', contents)
self.assertIsNone(contents['body'])
def test_get_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = b'foo'
content_type, body = resource.get_body(request)
self.assertEqual(content_type, 'application/json')
self.assertEqual(b'foo', body)
def test_get_request_id_with_dict_response_body(self):
class Controller(wsgi.Controller):
def index(self, req):
return {'foo': 'bar'}
req = fakes.HTTPRequest.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertIn('nova.context', req.environ)
self.assertEqual(b'{"foo": "bar"}', response.body)
self.assertEqual(response.status_int, 200)
def test_no_request_id_with_str_response_body(self):
class Controller(wsgi.Controller):
def index(self, req):
return 'foo'
req = fakes.HTTPRequest.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
# NOTE(alaski): This test is really to ensure that a str response
# doesn't error. Not having a request_id header is a side effect of
# our wsgi setup, ideally it would be there.
expected_header = self.get_req_id_header_name(req)
self.assertFalse(hasattr(response.headers, expected_header))
self.assertEqual(b'foo', response.body)
self.assertEqual(response.status_int, 200)
def test_get_request_id_no_response_body(self):
class Controller(object):
def index(self, req):
pass
req = fakes.HTTPRequest.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertIn('nova.context', req.environ)
self.assertEqual(b'', response.body)
self.assertEqual(response.status_int, 200)
def test_deserialize_default(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
obj = resource.deserialize('["foo"]')
self.assertEqual(obj, {'body': ['foo']})
def test_register_actions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
@wsgi.action('barAction')
def _action_bar(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_actions)
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({
'fooAction': extended._action_foo,
'barAction': extended._action_bar,
}, resource.wsgi_actions)
def test_get_method(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method = resource.get_method(None, 'index', None, '')
self.assertEqual(method, controller.index)
def test_get_method_action(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(method, controller._action_foo)
def test_get_method_action_whitelist(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('create')
def _create(self, req, body):
pass
@wsgi.action('delete')
def _delete(self, req, id):
pass
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_actions(extended)
method = resource.get_method(None, 'create',
'application/json',
'{"create": true}')
self.assertEqual(method, extended._create)
method = resource.get_method(None, 'delete', None, None)
self.assertEqual(method, extended._delete)
def test_resource_exception_handler_type_error(self):
# A TypeError should be translated to a Fault/HTTP 400.
def foo(a,):
return a
try:
with wsgi.ResourceExceptionHandler():
foo() # generate a TypeError
self.fail("Should have raised a Fault (HTTP 400)")
except wsgi.Fault as fault:
self.assertEqual(400, fault.status_int)
def test_resource_headers_are_utf8(self):
resp = webob.Response(status_int=202)
resp.headers['x-header1'] = 1
resp.headers['x-header2'] = u'header2'
resp.headers['x-header3'] = u'header3'
class Controller(object):
def index(self, req):
return resp
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
for val in six.itervalues(response.headers):
# All headers must be utf8
self.assertThat(val, matchers.EncodedByUTF8())
self.assertEqual('1', response.headers['x-header1'])
self.assertEqual('header2', response.headers['x-header2'])
self.assertEqual('header3', response.headers['x-header3'])
def test_resource_valid_utf8_body(self):
class Controller(object):
def update(self, req, id, body):
return body
req = webob.Request.blank('/tests/test_id', method="PUT")
body = b""" {"name": "\xe6\xa6\x82\xe5\xbf\xb5" } """
expected_body = b'{"name": "\\u6982\\u5ff5"}'
req.body = body
req.headers['Content-Type'] = 'application/json'
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(response.body, expected_body)
self.assertEqual(response.status_int, 200)
def test_resource_invalid_utf8(self):
class Controller(object):
def update(self, req, id, body):
return body
req = webob.Request.blank('/tests/test_id', method="PUT")
body = b""" {"name": "\xf0\x28\x8c\x28" } """
req.body = body
req.headers['Content-Type'] = 'application/json'
app = fakes.TestRouter(Controller())
self.assertRaises(UnicodeDecodeError, req.get_response, app)
class ResponseObjectTest(test.NoDBTestCase):
def test_default_code(self):
robj = wsgi.ResponseObject({})
self.assertEqual(robj.code, 200)
def test_modified_code(self):
robj = wsgi.ResponseObject({})
robj._default_code = 202
self.assertEqual(robj.code, 202)
def test_override_default_code(self):
robj = wsgi.ResponseObject({}, code=404)
self.assertEqual(robj.code, 404)
def test_override_modified_code(self):
robj = wsgi.ResponseObject({}, code=404)
robj._default_code = 202
self.assertEqual(robj.code, 404)
def test_set_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj.headers, {'header': 'foo'})
def test_get_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual(robj['hEADER'], 'foo')
def test_del_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
del robj['hEADER']
self.assertNotIn('header', robj.headers)
def test_header_isolation(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
hdrs = robj.headers
hdrs['hEADER'] = 'bar'
self.assertEqual(robj['hEADER'], 'foo')
class ValidBodyTest(test.NoDBTestCase):
def setUp(self):
super(ValidBodyTest, self).setUp()
self.controller = wsgi.Controller()
def test_is_valid_body(self):
body = {'foo': {}}
self.assertTrue(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_none(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body(None, 'foo'))
def test_is_valid_body_empty(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body({}, 'foo'))
def test_is_valid_body_no_entity(self):
wsgi.Resource(controller=None)
body = {'bar': {}}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_malformed_entity(self):
wsgi.Resource(controller=None)
body = {'foo': 'bar'}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
class TestController(test.NoDBTestCase):
def test_check_for_versions_intersection_negative(self):
func_list = \
[versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.1'),
api_version.APIVersionRequest(
'2.4'),
None),
versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.11'),
api_version.APIVersionRequest(
'3.1'),
None),
versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.8'),
api_version.APIVersionRequest(
'2.9'),
None),
]
result = wsgi.Controller.check_for_versions_intersection(func_list=
func_list)
self.assertFalse(result)
func_list = \
[versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.12'),
api_version.APIVersionRequest(
'2.14'),
None),
versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'3.0'),
api_version.APIVersionRequest(
'3.4'),
None)
]
result = wsgi.Controller.check_for_versions_intersection(func_list=
func_list)
self.assertFalse(result)
def test_check_for_versions_intersection_positive(self):
func_list = \
[versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.1'),
api_version.APIVersionRequest(
'2.4'),
None),
versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.3'),
api_version.APIVersionRequest(
'3.0'),
None),
versioned_method.VersionedMethod('foo',
api_version.APIVersionRequest(
'2.8'),
api_version.APIVersionRequest(
'2.9'),
None),
]
result = wsgi.Controller.check_for_versions_intersection(func_list=
func_list)
self.assertTrue(result)
class ExpectedErrorTestCase(test.NoDBTestCase):
def test_expected_error(self):
@wsgi.expected_errors(404)
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_expected_error_from_list(self):
@wsgi.expected_errors((404, 403))
def fake_func():
raise webob.exc.HTTPNotFound()
self.assertRaises(webob.exc.HTTPNotFound, fake_func)
def test_unexpected_error(self):
@wsgi.expected_errors(404)
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_unexpected_error_from_list(self):
@wsgi.expected_errors((404, 413))
def fake_func():
raise webob.exc.HTTPConflict()
self.assertRaises(webob.exc.HTTPInternalServerError, fake_func)
def test_unexpected_policy_not_authorized_error(self):
@wsgi.expected_errors(404)
def fake_func():
raise exception.PolicyNotAuthorized(action="foo")
self.assertRaises(exception.PolicyNotAuthorized, fake_func)
|
|
"""
The rechunk module defines:
intersect_chunks: a function for
converting chunks to new dimensions
rechunk: a function to convert the blocks
of an existing dask array to new chunks or blockshape
"""
from __future__ import absolute_import, division, print_function
import heapq
from itertools import product, chain, count
from operator import getitem, add, mul, itemgetter
import numpy as np
import toolz
from toolz import accumulate, reduce
from ..base import tokenize
from .core import concatenate3, Array, normalize_chunks
from .wrap import empty
from .. import sharedict
def cumdims_label(chunks, const):
""" Internal utility for cumulative sum with label.
>>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE
[(('n', 0), ('n', 5), ('n', 8), ('n', 11)),
(('n', 0), ('n', 2), ('n', 4), ('n', 5))]
"""
return [tuple(zip((const,) * (1 + len(bds)),
accumulate(add, (0,) + bds)))
for bds in chunks]
def _breakpoints(cumold, cumnew):
"""
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _breakpoints(new[0], old[0])
(('n', 0), ('o', 0), ('n', 2), ('o', 2), ('o', 4), ('n', 5), ('o', 5))
>>> _breakpoints(new[1], old[1])
(('n', 0), ('o', 0), ('n', 2), ('n', 4), ('n', 5), ('o', 5))
"""
return tuple(sorted(cumold + cumnew, key=itemgetter(1)))
def _intersect_1d(breaks):
"""
Internal utility to intersect chunks for 1d after preprocessing.
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _intersect_1d(_breakpoints(old[0], new[0])) # doctest: +NORMALIZE_WHITESPACE
[[(0, slice(0, 2, None))],
[(1, slice(0, 2, None)), (2, slice(0, 1, None))]]
>>> _intersect_1d(_breakpoints(old[1], new[1])) # doctest: +NORMALIZE_WHITESPACE
[[(0, slice(0, 2, None))],
[(0, slice(2, 4, None))],
[(0, slice(4, 5, None))]]
Parameters
----------
breaks: list of tuples
Each tuple is ('o', 8) or ('n', 8)
These are pairs of 'o' old or new 'n'
indicator with a corresponding cumulative sum.
Uses 'o' and 'n' to make new tuples of slices for
the new block crosswalk to old blocks.
"""
start = 0
last_end = 0
old_idx = 0
ret = []
ret_next = []
for idx in range(1, len(breaks)):
label, br = breaks[idx]
last_label, last_br = breaks[idx - 1]
if last_label == 'n':
if ret_next:
ret.append(ret_next)
ret_next = []
if last_label == 'o':
start = 0
else:
start = last_end
end = br - last_br + start
last_end = end
if br == last_br:
continue
ret_next.append((old_idx, slice(start, end)))
if label == 'o':
old_idx += 1
start = 0
if ret_next:
ret.append(ret_next)
return ret
def intersect_chunks(old_chunks, new_chunks):
"""
Make dask.array slices as intersection of old and new chunks.
>>> intersections = intersect_chunks(((4, 4), (2,)),
... ((8,), (1, 1)))
>>> list(intersections) # doctest: +NORMALIZE_WHITESPACE
[(((0, slice(0, 4, None)), (0, slice(0, 1, None))),
((1, slice(0, 4, None)), (0, slice(0, 1, None)))),
(((0, slice(0, 4, None)), (0, slice(1, 2, None))),
((1, slice(0, 4, None)), (0, slice(1, 2, None))))]
Parameters
----------
old_chunks : iterable of tuples
block sizes along each dimension (convert from old_chunks)
new_chunks: iterable of tuples
block sizes along each dimension (converts to new_chunks)
"""
cmo = cumdims_label(old_chunks, 'o')
cmn = cumdims_label(new_chunks, 'n')
sums = [sum(o) for o in old_chunks]
sums2 = [sum(n) for n in old_chunks]
if not sums == sums2:
raise ValueError('Cannot change dimensions from to %r' % sums2)
old_to_new = [_intersect_1d(_breakpoints(cm[0], cm[1]))
for cm in zip(cmo, cmn)]
cross1 = product(*old_to_new)
cross = chain(tuple(product(*cr)) for cr in cross1)
return cross
def blockdims_dict_to_tuple(old, new):
"""
>>> blockdims_dict_to_tuple((4, 5, 6), {1: 10})
(4, 10, 6)
"""
newlist = list(old)
for k, v in new.items():
newlist[k] = v
return tuple(newlist)
def blockshape_dict_to_tuple(old_chunks, d):
"""
>>> blockshape_dict_to_tuple(((4, 4), (5, 5)), {1: 3})
((4, 4), (3, 3, 3, 1))
"""
shape = tuple(map(sum, old_chunks))
new_chunks = list(old_chunks)
for k, v in d.items():
div = shape[k] // v
mod = shape[k] % v
new_chunks[k] = (v,) * div + ((mod,) if mod else ())
return tuple(new_chunks)
DEFAULT_THRESHOLD = 4
DEFAULT_BLOCK_SIZE_LIMIT = 1e8
def rechunk(x, chunks, threshold=DEFAULT_THRESHOLD,
block_size_limit=DEFAULT_BLOCK_SIZE_LIMIT):
"""
Convert blocks in dask array x for new chunks.
>>> import dask.array as da
>>> a = np.random.uniform(0, 1, 7**4).reshape((7,) * 4)
>>> x = da.from_array(a, chunks=((2, 3, 2),)*4)
>>> x.chunks
((2, 3, 2), (2, 3, 2), (2, 3, 2), (2, 3, 2))
>>> y = rechunk(x, chunks=((2, 4, 1), (4, 2, 1), (4, 3), (7,)))
>>> y.chunks
((2, 4, 1), (4, 2, 1), (4, 3), (7,))
chunks also accept dict arguments mapping axis to blockshape
>>> y = rechunk(x, chunks={1: 2}) # rechunk axis 1 with blockshape 2
Parameters
----------
x: dask array
chunks: tuple
The new block dimensions to create
threshold: int
The graph growth factor under which we don't bother
introducing an intermediate step
block_size_limit: int
The maximum block size (in bytes) we want to produce during an
intermediate step
"""
threshold = threshold or DEFAULT_THRESHOLD
block_size_limit = block_size_limit or DEFAULT_BLOCK_SIZE_LIMIT
if isinstance(chunks, dict):
if not chunks or isinstance(next(iter(chunks.values())), int):
chunks = blockshape_dict_to_tuple(x.chunks, chunks)
else:
chunks = blockdims_dict_to_tuple(x.chunks, chunks)
if isinstance(chunks, (tuple, list)):
chunks = tuple(lc if lc is not None else rc
for lc, rc in zip(chunks, x.chunks))
chunks = normalize_chunks(chunks, x.shape)
if chunks == x.chunks:
return x
ndim = x.ndim
if not len(chunks) == ndim or tuple(map(sum, chunks)) != x.shape:
raise ValueError("Provided chunks are not consistent with shape")
steps = plan_rechunk(x.chunks, chunks, x.dtype.itemsize,
threshold, block_size_limit)
for c in steps:
x = _compute_rechunk(x, c)
return x
def _number_of_blocks(chunks):
return reduce(mul, map(len, chunks))
def _largest_block_size(chunks):
return reduce(mul, map(max, chunks))
def estimate_graph_size(old_chunks, new_chunks):
""" Estimate the graph size during a rechunk computation.
"""
# Estimate the number of intermediate blocks that will be produced
# (we don't use intersect_chunks() which is much more expensive)
crossed_size = reduce(mul, (len(oc) + len(nc)
for oc, nc in zip(old_chunks, new_chunks)))
return crossed_size
def divide_to_width(desired_chunks, max_width):
""" Minimally divide the given chunks so as to make the largest chunk
width less or equal than *max_width*.
"""
chunks = []
for c in desired_chunks:
nb_divides = int(np.ceil(c / max_width))
for i in range(nb_divides):
n = c // (nb_divides - i)
chunks.append(n)
c -= n
assert c == 0
return tuple(chunks)
def merge_to_number(desired_chunks, max_number):
""" Minimally merge the given chunks so as to drop the number of
chunks below *max_number*, while minimizing the largest width.
"""
if len(desired_chunks) <= max_number:
return desired_chunks
distinct = set(desired_chunks)
if len(distinct) == 1:
# Fast path for homogeneous target, also ensuring a regular result
w = distinct.pop()
n = len(desired_chunks)
total = n * w
desired_width = total // max_number
width = w * (desired_width // w)
adjust = (total - max_number * width) // w
return (width + w,) * adjust + (width,) * (max_number - adjust)
desired_width = sum(desired_chunks) // max_number
nmerges = len(desired_chunks) - max_number
heap = [(desired_chunks[i] + desired_chunks[i + 1], i, i + 1)
for i in range(len(desired_chunks) - 1)]
heapq.heapify(heap)
chunks = list(desired_chunks)
while nmerges > 0:
# Find smallest interval to merge
width, i, j = heapq.heappop(heap)
# If interval was made invalid by another merge, recompute
# it, re-insert it and retry.
if chunks[j] == 0:
j += 1
while chunks[j] == 0:
j += 1
heapq.heappush(heap, (chunks[i] + chunks[j], i, j))
continue
elif chunks[i] + chunks[j] != width:
heapq.heappush(heap, (chunks[i] + chunks[j], i, j))
continue
# Merge
assert chunks[i] != 0
chunks[i] = 0 # mark deleted
chunks[j] = width
nmerges -= 1
return tuple(filter(None, chunks))
def find_merge_rechunk(old_chunks, new_chunks, block_size_limit):
"""
Find an intermediate rechunk that would merge some adjacent blocks
together in order to get us nearer the *new_chunks* target, without
violating the *block_size_limit* (in number of elements).
"""
ndim = len(old_chunks)
old_largest_width = [max(c) for c in old_chunks]
new_largest_width = [max(c) for c in new_chunks]
graph_size_effect = {
dim: len(nc) / len(oc)
for dim, (oc, nc) in enumerate(zip(old_chunks, new_chunks))
}
block_size_effect = {
dim: new_largest_width[dim] / old_largest_width[dim]
for dim in range(ndim)
}
# Our goal is to reduce the number of nodes in the rechunk graph
# by merging some adjacent chunks, so consider dimensions where we can
# reduce the # of chunks
merge_candidates = [dim for dim in range(ndim)
if graph_size_effect[dim] <= 1.0]
# Merging along each dimension reduces the graph size by a certain factor
# and increases memory largest block size by a certain factor.
# We want to optimize the graph size while staying below the given
# block_size_limit. This is in effect a knapsack problem, except with
# multiplicative values and weights. Just use a greedy algorithm
# by trying dimensions in decreasing value / weight order.
def key(k):
gse = graph_size_effect[k]
bse = block_size_effect[k]
if bse == 1:
bse = 1 + 1e-9
return np.log(gse) / np.log(bse)
sorted_candidates = sorted(merge_candidates, key=key)
largest_block_size = reduce(mul, old_largest_width)
chunks = list(old_chunks)
memory_limit_hit = False
for dim in sorted_candidates:
# Examine this dimension for possible graph reduction
new_largest_block_size = (
largest_block_size * new_largest_width[dim] // old_largest_width[dim])
if new_largest_block_size <= block_size_limit:
# Full replacement by new chunks is possible
chunks[dim] = new_chunks[dim]
largest_block_size = new_largest_block_size
else:
# Try a partial rechunk, dividing the new chunks into
# smaller pieces
largest_width = old_largest_width[dim]
chunk_limit = int(block_size_limit * largest_width / largest_block_size)
c = divide_to_width(new_chunks[dim], chunk_limit)
if len(c) <= len(old_chunks[dim]):
# We manage to reduce the number of blocks, so do it
chunks[dim] = c
largest_block_size = largest_block_size * max(c) // largest_width
memory_limit_hit = True
assert largest_block_size == _largest_block_size(chunks)
assert largest_block_size <= block_size_limit
return tuple(chunks), memory_limit_hit
def find_split_rechunk(old_chunks, new_chunks, graph_size_limit):
"""
Find an intermediate rechunk that would split some chunks to
get us nearer *new_chunks*, without violating the *graph_size_limit*.
"""
ndim = len(old_chunks)
chunks = list(old_chunks)
for dim in range(ndim):
graph_size = estimate_graph_size(chunks, new_chunks)
if graph_size > graph_size_limit:
break
if len(old_chunks[dim]) > len(new_chunks[dim]):
# It's not interesting to split
continue
# Merge the new chunks so as to stay within the graph size budget
max_number = int(len(old_chunks[dim]) * graph_size_limit / graph_size)
c = merge_to_number(new_chunks[dim], max_number)
assert len(c) <= max_number
# Consider the merge successful if its result has a greater length
# and smaller max width than the old chunks
if len(c) >= len(old_chunks[dim]) and max(c) <= max(old_chunks[dim]):
chunks[dim] = c
return tuple(chunks)
def plan_rechunk(old_chunks, new_chunks, itemsize,
threshold=DEFAULT_THRESHOLD,
block_size_limit=DEFAULT_BLOCK_SIZE_LIMIT):
""" Plan an iterative rechunking from *old_chunks* to *new_chunks*.
The plan aims to minimize the rechunk graph size.
Parameters
----------
itemsize: int
The item size of the array
threshold: int
The graph growth factor under which we don't bother
introducing an intermediate step
block_size_limit: int
The maximum block size (in bytes) we want to produce during an
intermediate step
"""
ndim = len(new_chunks)
steps = []
if ndim <= 1 or not all(new_chunks):
# Trivial array => no need for an intermediate
return steps + [new_chunks]
# Make it a number ef elements
block_size_limit /= itemsize
# Fix block_size_limit if too small for either old_chunks or new_chunks
largest_old_block = _largest_block_size(old_chunks)
largest_new_block = _largest_block_size(new_chunks)
block_size_limit = max([block_size_limit,
largest_old_block,
largest_new_block,
])
# The graph size above which to optimize
graph_size_threshold = threshold * (_number_of_blocks(old_chunks) +
_number_of_blocks(new_chunks))
current_chunks = old_chunks
first_pass = True
while True:
graph_size = estimate_graph_size(current_chunks, new_chunks)
if graph_size < graph_size_threshold:
break
if first_pass:
chunks = current_chunks
else:
# We hit the block_size_limit in a previous merge pass =>
# accept a significant increase in graph size in exchange for
# 1) getting nearer the goal 2) reducing the largest block size
# to make place for the following merge.
# To see this pass in action, make the block_size_limit very small.
chunks = find_split_rechunk(current_chunks, new_chunks,
graph_size * threshold)
chunks, memory_limit_hit = find_merge_rechunk(chunks, new_chunks,
block_size_limit)
if chunks == current_chunks or chunks == new_chunks:
break
steps.append(chunks)
current_chunks = chunks
if not memory_limit_hit:
break
first_pass = False
return steps + [new_chunks]
def _compute_rechunk(x, chunks):
""" Compute the rechunk of *x* to the given *chunks*.
"""
if x.size == 0:
# Special case for empty array, as the algorithm below does not behave correctly
return empty(x.shape, chunks=chunks, dtype=x.dtype)
ndim = x.ndim
crossed = intersect_chunks(x.chunks, chunks)
x2 = dict()
intermediates = dict()
token = tokenize(x, chunks)
merge_temp_name = 'rechunk-merge-' + token
split_temp_name = 'rechunk-split-' + token
split_name_suffixes = count()
# Pre-allocate old block references, to allow re-use and reduce the
# graph's memory footprint a bit.
old_blocks = np.empty([len(c) for c in x.chunks], dtype='O')
for index in np.ndindex(old_blocks.shape):
old_blocks[index] = (x.name,) + index
# Iterate over all new blocks
new_index = product(*(range(len(c)) for c in chunks))
for new_idx, cross1 in zip(new_index, crossed):
key = (merge_temp_name,) + new_idx
old_block_indices = [[cr[i][0] for cr in cross1] for i in range(ndim)]
subdims1 = [len(set(old_block_indices[i]))
for i in range(ndim)]
rec_cat_arg = np.empty(subdims1, dtype='O')
rec_cat_arg_flat = rec_cat_arg.flat
# Iterate over the old blocks required to build the new block
for rec_cat_index, ind_slices in enumerate(cross1):
old_block_index, slices = zip(*ind_slices)
name = (split_temp_name, next(split_name_suffixes))
intermediates[name] = (getitem, old_blocks[old_block_index], slices)
rec_cat_arg_flat[rec_cat_index] = name
assert rec_cat_index == rec_cat_arg.size - 1
# New block is formed by concatenation of sliced old blocks
if all(d == 1 for d in rec_cat_arg.shape):
x2[key] = rec_cat_arg.flat[0]
else:
x2[key] = (concatenate3, rec_cat_arg.tolist())
assert new_idx == tuple(len(c) - 1 for c in chunks)
del old_blocks, new_index
x2 = sharedict.merge(x.dask, (merge_temp_name, toolz.merge(x2, intermediates)))
return Array(x2, merge_temp_name, chunks, dtype=x.dtype)
class _PrettyBlocks(object):
def __init__(self, blocks):
self.blocks = blocks
def __str__(self):
runs = []
run = []
repeats = 0
for c in self.blocks:
if run and run[-1] == c:
if repeats == 0 and len(run) > 1:
runs.append((None, run[:-1]))
run = run[-1:]
repeats += 1
else:
if repeats > 0:
assert len(run) == 1
runs.append((repeats + 1, run[-1]))
run = []
repeats = 0
run.append(c)
if run:
if repeats == 0:
runs.append((None, run))
else:
assert len(run) == 1
runs.append((repeats + 1, run[-1]))
parts = []
for repeats, run in runs:
if repeats is None:
parts.append(str(run))
else:
parts.append("%d*[%s]" % (repeats, run))
return " | ".join(parts)
__repr__ = __str__
def format_blocks(blocks):
"""
Pretty-format *blocks*.
>>> format_blocks((10, 10, 10))
3*[10]
>>> format_blocks((2, 3, 4))
[2, 3, 4]
>>> format_blocks((10, 10, 5, 6, 2, 2, 2, 7))
2*[10] | [5, 6] | 3*[2] | [7]
"""
assert (isinstance(blocks, tuple) and
all(isinstance(x, int) for x in blocks))
return _PrettyBlocks(blocks)
def format_chunks(chunks):
"""
>>> format_chunks((10 * (3,), 3 * (10,)))
(10*[3], 3*[10])
"""
assert isinstance(chunks, tuple)
return tuple(format_blocks(c) for c in chunks)
def format_plan(plan):
"""
>>> format_plan([((10, 10, 10), (15, 15)), ((30,), (10, 10, 10))])
[(3*[10], 2*[15]), ([30], 3*[10])]
"""
return [format_chunks(c) for c in plan]
|
|
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network model to predict time.
##### Features these models operate on ######
These models take featurized tasks and convert them to some hidden state.
Each task is configured by some number of keys / settings. For example let's
say we have a task with the following config (in pseudo code):
Task.datasets.mnist.batch_size=128,
Task.datasets.mnist.image_size=(14, 14),
Task.activation_idx=2
These cfgs get featurized into 3 values: key_feats, float_feats, and int_feats.
The float values are from values interpreted as floats, the int values are for
integer / categorical features, and the key feats are a integer representation
of the key string.
Each of these have a leading dimension of number of configured values, N,(in
this case N=3). The shapes are then:
key_feats: int32[N, P]
float_feats: float32[N, P1]
int_feats: int32[N]
For the key feats, each key represents a key in the configuration run through
the hashing trick to convert to an integer then padded. See
https://en.wikipedia.org/wiki/Feature_hashing for more info.
So, the first key in the above example is (assuming P=8):
[H("task"), H("datasets"), H("mnist"), H("batch_size"), 0, 0, 0, 0]
As with the keys, the float features are also padded. In this example, both
batch_size and image_size are both intpreted as float features (after logging)
by a special case based on the argument name (see tasks/parametric/cfgobject.py)
. So here our float features will have values:
[log(128), 0, 0, 0, 0, 0, 0, 0]
[log(14), log(14), 0, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 0]
The int features do not have to be padded as at this point are only 1D. So here
the int features will be:
[0, 0, 2]
For a full description of the featurization see tasks/parametric/cfgobject.py.
##### Outline of the neual architecture #####
We seek to convert these features into some dense representation. The meaning
of the int_feats and float_feats varies widly depending on the key. The values
not only mean different things, but they can also take on vastly different
scales. To account for this, we make use of a table of both running normalizers,
and weights which we update depending on the keys passed in.
First, we collapse the keys to have a length of 1 rather than P. This throws
away information about the structure of the key and treats different sequences
as different values.
Next, we build these embedding tables. These tables have a length of E, where
E is the number of unique hash-trick values we support. (Note there could be a
collision here but I expect this to be fine.)
T_norm_min: float32[E, P1]
T_norm_max: float32[E, P1]
float feats are normalized by:
f = (float_feat - T_norm_min[key]) / (T_norm_max[key] - T_norm_min[key])
The min and max values from float_feats are used to update the T_norm_max and
T_norm_min tables.
Next we apply a linear layer. H is the embedding dimension and is set to
say 128.
T_w: float32[E, P1, H]
T_b: float32[E, H]
h_f = f @ T_w[E] + T_b[E]
For the int features we embed them using an embedding table. E2 is the
T_i: float32[E, P2, H]
h_i = T_i[E, int_feats]
We then add these up.
h = h_i + h_f
At this point h: float32[N, H].
The rest of the network consists of mixtures of linear operations mapping the H
dimension, as well as reductions over the N dimension. This is inspired by
["DeepSets"](https://arxiv.org/abs/1703.06114).
Finally, a reduction is done to remove the leading, variable N dimesion.
This is the final featurized value and can be used to make predictions.
"""
import functools
import os
from typing import Callable, Tuple, Optional
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization import checkpoints
from learned_optimization.tasks.parametric import cfgobject
import numpy as onp
PRNGKey = jnp.ndarray
def features_to_hidden(key_feats: jnp.ndarray,
float_feats: jnp.ndarray,
int_feats: jnp.ndarray,
feat_mask: jnp.ndarray,
n_embed: int = 4096,
n_hidden: int = 128,
n_hidden2: int = 512) -> jnp.ndarray:
"""Map features, to a sense hidden representation.
See module level documentation for more info.
For args we use the following symbols to denote shapes:
BS: batch size of different featurized cfgs.
N: number of tags within a given cfg. This is variable across config but
padded to a fixed size.
P: Padding to some fixed dim.
H: Some hidden representation size
Args:
key_feats: float32[BS, N, P1]
float_feats: float32[BS, N, P2]
int_feats: int32[BS, N]
feat_mask: float32[BS, N] -- a mask when there are less features than N.
n_embed: Number of embedded values for both keys and ints.
n_hidden: hidden size while working with the N keys.
n_hidden2: hidden size after reducing across the N keys.
Returns:
float32[BS, H] the featurized cfgs
"""
# Keys right now are [bs, num_feats, 8] where 8 is each subsequent tag --
#. So the key MLP/hidden_size would have 2 values and the rest 0.
# for for simplicity we will simply add each entity up and rely upon the mod
# to collapse them down to one.
flat_keys_feats = jnp.sum(key_feats, axis=2)
flat_keys_feats = flat_keys_feats % n_embed
nan = hk.initializers.Constant(jnp.nan)
min_state = hk.get_state("min", shape=[n_embed, 8], init=nan)
max_state = hk.get_state("max", shape=[n_embed, 8], init=nan)
new_min = jnp.fmin(min_state[flat_keys_feats], jnp.min(float_feats, axis=0))
new_max = jnp.fmax(max_state[flat_keys_feats], jnp.max(float_feats, axis=0))
float_feats = (float_feats - new_min) / (1e-15 + (new_max - new_min))
float_feats = (float_feats - 0.5) * 2
new_min = min_state.at[flat_keys_feats].set(new_min)
new_max = max_state.at[flat_keys_feats].set(new_max)
hk.set_state("min", new_min)
hk.set_state("max", new_max)
id_emb = hk.get_parameter(
"int_feats",
shape=[n_embed, 8, n_hidden],
init=hk.initializers.RandomUniform(-0.1, 0.1))
int_feat_embed = id_emb[flat_keys_feats, int_feats]
float_emb = hk.get_parameter(
"float_feats",
shape=[n_embed, 8, n_hidden],
init=hk.initializers.TruncatedNormal(1.0 / onp.sqrt(8)))
float_emb_b = hk.get_parameter(
"float_feats_b",
shape=[n_embed, n_hidden],
init=hk.initializers.Constant(0))
feat_embed = jax.nn.relu(
jax.vmap(jax.vmap(lambda a, b, c: a @ b + c))(
float_feats, float_emb[flat_keys_feats],
float_emb_b[flat_keys_feats]))
hidden = (int_feat_embed + feat_embed)
assert feat_mask.shape == (hidden.shape[0], hidden.shape[1], 1)
hidden = hk.Linear(n_hidden)(hidden)
hidden = jax.nn.relu(hidden)
hidden = hidden * feat_mask
# hidden is currently [batch, number of attributes, features]
hmean = jnp.sum(
hidden, axis=1, keepdims=True) / jnp.sum(
feat_mask, axis=1, keepdims=True)
hmax = jnp.max(hidden, axis=1, keepdims=True)
h0 = jax.nn.relu(hk.Linear(n_hidden)(hidden))
h1 = jax.nn.relu(hk.Linear(n_hidden)(hmean))
h2 = jax.nn.relu(hk.Linear(n_hidden)(hmax))
hidden = hk.Linear(n_hidden2)((h0 + h1 + h2) / 3.)
hidden = hidden * feat_mask
hidden = jnp.sum(hidden, axis=1) / jnp.sum(feat_mask, axis=1)
# hidden is now [batch, features]
hidden = jax.nn.relu(hk.Linear(n_hidden2)(hidden))
hidden = jax.nn.relu(hk.Linear(n_hidden2)(hidden))
return hidden
def timing_model_forward(feats: Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray],
mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""A haiku function which converts features to a time prediction.
This must be called with hk.transform!
Args:
feats: [int32[B, N, P1], float32[B, N, P2], int32[B, N]]
mask: float32[B, N]
Returns:
The predicted time model. float32[B]
"""
keys, floats, ints = feats
if mask is None:
mask = jnp.ones((keys.shape[0], floats.shape[1], 1))
outs = features_to_hidden(keys, floats, ints, mask)
output = hk.Linear(1)(outs)[:, 0]
# shift arbitrarily to roughly center the outputs.
# TODO(lmetz) ensure this is stable across a variety of models.
return jnp.exp(output - 10.)
@hk.transform_with_state
def loss_fn(feats: Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray],
times: jnp.ndarray) -> jnp.ndarray:
out = timing_model_forward(feats)
assert out.shape == times.shape
return jnp.mean(jnp.square(jnp.log(times) - jnp.log(out)))
def get_timing_model_root_dir() -> str:
root_dir = "~/lopt_model_timings"
root_dir = os.environ.get("LOPT_TIMING_MODEL_DIR", root_dir)
return root_dir
def get_timing_model_dir(sample_fn_name: str, hardware_name: str) -> str:
root_dir = get_timing_model_root_dir()
path = os.path.join(root_dir, sample_fn_name, hardware_name)
return os.path.expanduser(path)
Feats = Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]
PredictionFN = Callable[[Feats], jnp.ndarray]
@functools.lru_cache(None)
def load_model(model_path_suffix: str) -> PredictionFN:
"""Load and construct inference function for a timing model.
Args:
model_path_suffix: Path to load. This suffix is appended to
`get_timing_model_root_dir()` then loaded.
Returns:
A callable which maps from task features to predicted runtime.
"""
path = os.path.join(get_timing_model_root_dir(), model_path_suffix)
logging.info(f"Loading timing model from {path}") # pylint: disable=logging-fstring-interpolation
init, apply = hk.transform_with_state(timing_model_forward)
key = jax.random.PRNGKey(0)
feats = (jnp.zeros([1, 1, 8],
dtype=jnp.int32), jnp.zeros([1, 1, 8], dtype=jnp.float32),
jnp.zeros([1, 1], dtype=jnp.int32))
weights, state = init(key, feats)
weights, state = checkpoints.load_state(path, (weights, state))
apply_jit = jax.jit(apply)
def apply_model(feats):
key = jax.random.PRNGKey(0)
out, unused_next_state = apply_jit(weights, state, key, feats)
return out
return apply_model
def rejection_sample(sampler: Callable[[PRNGKey], cfgobject.CFGObject],
model_path_suffix: str, key: PRNGKey,
max_time: float) -> cfgobject.CFGObject:
"""Perform rejection sampling to sample task cfgs which run in < max_time.
Args:
sampler: Function which returns the configurations to be sampled from.
model_path_suffix: the trailing suffix to the saved model. This should be
something like: "sample_image_mlp/tpu_TPUv4/20220103_133049.weights". This
suffix is appended to `get_timing_model_root_dir()` then loaded.
key: jax random key.
max_time: Max amount of time to allow in sampled tasks.
Returns:
CFGObject that represents a TaskFamily which runs in less than `max_time`.
"""
rng = hk.PRNGSequence(key)
forward_fn = load_model(model_path_suffix)
# batchsize to run through the timing model.
# Most of the time is spent on featurization at the moment, so this number
# is low.
batch_size = 1
for _ in range(512 // batch_size):
keys = jax.random.split(next(rng), batch_size)
cfgs = [sampler(key) for key in keys]
key_feat, int_feat, float_feat, feat_mask = cfgobject.featurize_many(cfgs)
# TODO(lmetz) pass through feat mask to support variable length features
del feat_mask
times = forward_fn((key_feat, int_feat, float_feat))
mask = times < max_time
if onp.all(onp.logical_not(mask)):
continue
else:
return cfgs[onp.argmax(mask)]
raise ValueError(
f"Nothing found for static: {model_path_suffix} and time {max_time}")
|
|
#
# ESPrinkler2 testserver.py
#
# This is a smiple server so data-uncompressed can be tested
# and edited using a local browser to http://localhost
# Note that data-uncompressed does not save config.json nor sched.json
# This server saves that data as strings
# Similarly current time and zone status are simulated
#
import SimpleHTTPServer
from urlparse import urlparse, parse_qs
import SocketServer
import os
import time
import json
import cgi
zones = [
'off', 'off', 'off', 'off',
'off', 'off', 'off', 'off'
]
htime = 0
ltime = time.time()
offsetGMT = 0
config = None
sched = None
buttons = None
host = 'ESPrinkler2'
class ESPrinkler2RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def xsend(self, content, contentType="text/html"):
print "serving custom response: " + content
self.send_response(200)
self.send_header("Content-Type", contentType)
v = len(content)
self.send_header("Content-Tength", v)
self.end_headers()
self.wfile.write(content)
def do_GET(self):
global zones, htime, ltime, config, sched, host, buttons, offsetGMT
print self.path
query_components = parse_qs(urlparse(self.path).query)
self.path = self.path.split('?', 1)[0]
print self.path
if self.path == '/status':
htime += time.time() - ltime
ltime = time.time()
v = {'time': int(htime), 'host': host, 'offsetGMT': offsetGMT}
for i in range(0, 8):
v['zone'+str(i)] = zones[i]
self.xsend(json.dumps(v), "text/json")
elif self.path == '/settime':
htime = 0
try:
htime = int(query_components['time'][0])
except:
pass
ltime = time.time()
try:
o = int(query_components['offset'][0])
offsetGMT = o
except:
pass
self.xsend("ok")
elif self.path == '/clear':
for i in range(0, 8):
zones[i] = 'off'
self.xsend("ok")
elif self.path == '/clean':
config = None
sched = None
buttons = None
self.xsend("Persistant Storage has been cleaned.")
elif self.path == '/toggle':
i = 0
try:
i = int(query_components['zone'][0])
except:
pass
print i
z = zones[i]
if z == 'on':
z = 'off'
else:
z = 'on'
zones[i] = z
self.xsend("ok")
elif self.path == '/restart':
self.xsend(
"Restarting... please wait a minute or two and refresh")
elif self.path == '/list':
v = []
for f in os.listdir('.'):
v.append({'type': 'file', 'name': f})
self.xsend(json.dumps(v), "text/json")
# config.json is not saved, but only temp string storage
elif self.path == '/config.json':
if config is None:
self.send_error(404, "File not found")
return None
else:
print('config sending\n'+config)
self.xsend(config)
# sched.json is not saved, but only temp string storage
elif self.path == '/sched.json':
if sched is None:
self.send_error(404, "File not found")
return None
else:
print('sched sending\n'+sched)
self.xsend(sched)
# buttons.json is not saved, but only temp string storage
elif self.path == '/buttons.json':
if buttons is None:
self.send_error(404, "File not found")
return None
else:
print('buttons sending\n'+buttons)
self.xsend(buttons)
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_DELETE(self):
self.path = self.path.split('?', 1)[0]
print self.path
if self.path == '/edit':
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
for field in form.keys():
field_item = form[field]
if field == 'path':
fn = field_item.value
if fn.startswith('/'):
fn = fn[1:]
if os.path.isfile(fn):
os.remove(fn)
self.xsend("")
return
self.send_response(404)
def do_PUT(self):
self.path = self.path.split('?', 1)[0]
print self.path
if self.path == '/edit':
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
for field in form.keys():
field_item = form[field]
if field == 'path':
fn = field_item.value
if fn.startswith('/'):
fn = fn[1:]
if not os.path.isfile(fn):
f = open(fn, 'w')
f.close()
self.xsend("")
return
else:
self.send_response(404)
self.end_headers()
return
self.send_response(404)
def do_POST(self):
global config, sched, buttons, host, offsetGMT
self.path = self.path.split('?', 1)[0]
print self.path
if self.path == '/edit':
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
for field in form.keys():
field_item = form[field]
if field_item.filename:
# The field contains an uploaded file
file_data = field_item.file.read()
file_len = len(file_data)
print('Uploaded %s as "%s" (%d bytes)' %
(field, field_item.filename, file_len))
if field == 'data':
fn = field_item.filename
if fn.startswith('/'):
fn = fn[1:]
f = open(fn, 'w')
f.write(file_data)
f.close()
print 'wrote:'+fn
# config and sched are never saved for testing
if (fn == 'config.json' or fn == 'sched.json'
or fn == 'buttons.json'):
os.remove(fn)
if fn == 'config.json':
config = file_data
try:
tt = json.loads(file_data)
host = tt['host']
x = int(tt['offsetGMT'])
if x != -1:
x = offsetGMT
except:
pass
print('config\n'+file_data)
if fn == 'sched.json':
sched = file_data
print('sched\n'+file_data)
if fn == 'buttons.json':
buttons = file_data
print('buttons\n'+file_data)
del file_data
else:
# Regular form value
print('%s=%s' % (field, form[field].value))
self.xsend("<html><body>ok</body></html>")
if self.path == '/update':
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
for field in form.keys():
fields = form[field]
print(fields)
for field_item in fields:
if field_item.filename:
# The field contains an uploaded file
file_data = field_item.file.read()
file_len = len(file_data)
print('Uploaded update %s as "%s" (%d bytes)' %
(field, field_item.filename, file_len))
# not saving this the update file
else:
# Regular form value
print('%s=%s' % (field_item.name,
field_item.value))
self.send_response(301)
self.send_header('Location', '/updatesuccessful.html')
self.end_headers()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
os.chdir("data-uncompressed")
PORT = 80
Handler = ESPrinkler2RequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
print "serving folder", os.getcwd()
httpd.serve_forever()
|
|
# -*- coding: utf-8 -*-
"""SQLite parser plugin for Android call history database files."""
from dfdatetime import java_time as dfdatetime_java_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class AndroidCallEventData(events.EventData):
"""Android Call event data.
Attributes:
call_type (str): type of call, such as: Incoming, Outgoing, or Missed.
duration (int): number of seconds the call lasted.
name (str): name associated to the remote party.
number (str): phone number associated to the remote party.
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
"""
DATA_TYPE = 'android:event:call'
def __init__(self):
"""Initializes event data."""
super(AndroidCallEventData, self).__init__(data_type=self.DATA_TYPE)
self.call_type = None
self.duration = None
self.name = None
self.number = None
self.offset = None
self.query = None
class AndroidCallPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Android call history database files.
The Android call history database file is typically stored in:
contacts2.db
"""
NAME = 'android_calls'
DATA_FORMAT = 'Android call history SQLite database (contacts2.db) file'
REQUIRED_STRUCTURE = {
'calls': frozenset(['_id', 'date', 'number', 'name', 'duration', 'type'])}
QUERIES = [
('SELECT _id AS id, date, number, name, duration, type FROM calls',
'ParseCallsRow')]
SCHEMAS = [{
'_sync_state': (
'CREATE TABLE _sync_state (_id INTEGER PRIMARY KEY, account_name '
'TEXT NOT NULL, account_type TEXT NOT NULL, data TEXT, '
'UNIQUE(account_name, account_type))'),
'_sync_state_metadata': (
'CREATE TABLE _sync_state_metadata (version INTEGER)'),
'accounts': (
'CREATE TABLE accounts (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'account_name TEXT, account_type TEXT, data_set TEXT)'),
'agg_exceptions': (
'CREATE TABLE agg_exceptions (_id INTEGER PRIMARY KEY '
'AUTOINCREMENT, type INTEGER NOT NULL, raw_contact_id1 INTEGER '
'REFERENCES raw_contacts(_id), raw_contact_id2 INTEGER REFERENCES '
'raw_contacts(_id))'),
'android_metadata': (
'CREATE TABLE android_metadata (locale TEXT)'),
'calls': (
'CREATE TABLE calls (_id INTEGER PRIMARY KEY AUTOINCREMENT, number '
'TEXT, date INTEGER, duration INTEGER, type INTEGER, new INTEGER, '
'name TEXT, numbertype INTEGER, numberlabel TEXT, countryiso TEXT, '
'voicemail_uri TEXT, is_read INTEGER, geocoded_location TEXT, '
'lookup_uri TEXT, matched_number TEXT, normalized_number TEXT, '
'photo_id INTEGER NOT NULL DEFAULT 0, formatted_number TEXT, _data '
'TEXT, has_content INTEGER, mime_type TEXT, source_data TEXT, '
'source_package TEXT, state INTEGER)'),
'contacts': (
'CREATE TABLE contacts (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'name_raw_contact_id INTEGER REFERENCES raw_contacts(_id), photo_id '
'INTEGER REFERENCES data(_id), photo_file_id INTEGER REFERENCES '
'photo_files(_id), custom_ringtone TEXT, send_to_voicemail INTEGER '
'NOT NULL DEFAULT 0, times_contacted INTEGER NOT NULL DEFAULT 0, '
'last_time_contacted INTEGER, starred INTEGER NOT NULL DEFAULT 0, '
'has_phone_number INTEGER NOT NULL DEFAULT 0, lookup TEXT, '
'status_update_id INTEGER REFERENCES data(_id), '
'contact_last_updated_timestamp INTEGER)'),
'data': (
'CREATE TABLE data (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'package_id INTEGER REFERENCES package(_id), mimetype_id INTEGER '
'REFERENCES mimetype(_id) NOT NULL, raw_contact_id INTEGER '
'REFERENCES raw_contacts(_id) NOT NULL, is_read_only INTEGER NOT '
'NULL DEFAULT 0, is_primary INTEGER NOT NULL DEFAULT 0, '
'is_super_primary INTEGER NOT NULL DEFAULT 0, data_version INTEGER '
'NOT NULL DEFAULT 0, data1 TEXT, data2 TEXT, data3 TEXT, data4 '
'TEXT, data5 TEXT, data6 TEXT, data7 TEXT, data8 TEXT, data9 TEXT, '
'data10 TEXT, data11 TEXT, data12 TEXT, data13 TEXT, data14 TEXT, '
'data15 TEXT, data_sync1 TEXT, data_sync2 TEXT, data_sync3 TEXT, '
'data_sync4 TEXT )'),
'data_usage_stat': (
'CREATE TABLE data_usage_stat(stat_id INTEGER PRIMARY KEY '
'AUTOINCREMENT, data_id INTEGER NOT NULL, usage_type INTEGER NOT '
'NULL DEFAULT 0, times_used INTEGER NOT NULL DEFAULT 0, '
'last_time_used INTEGER NOT NULL DEFAULT 0, FOREIGN KEY(data_id) '
'REFERENCES data(_id))'),
'default_directory': (
'CREATE TABLE default_directory (_id INTEGER PRIMARY KEY)'),
'deleted_contacts': (
'CREATE TABLE deleted_contacts (contact_id INTEGER PRIMARY KEY, '
'contact_deleted_timestamp INTEGER NOT NULL default 0)'),
'directories': (
'CREATE TABLE directories(_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'packageName TEXT NOT NULL, authority TEXT NOT NULL, typeResourceId '
'INTEGER, typeResourceName TEXT, accountType TEXT, accountName '
'TEXT, displayName TEXT, exportSupport INTEGER NOT NULL DEFAULT 0, '
'shortcutSupport INTEGER NOT NULL DEFAULT 0, photoSupport INTEGER '
'NOT NULL DEFAULT 0)'),
'groups': (
'CREATE TABLE groups (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'package_id INTEGER REFERENCES package(_id), account_name STRING '
'DEFAULT NULL, account_type STRING DEFAULT NULL, data_set STRING '
'DEFAULT NULL, sourceid TEXT, version INTEGER NOT NULL DEFAULT 1, '
'dirty INTEGER NOT NULL DEFAULT 0, title TEXT, title_res INTEGER, '
'notes TEXT, system_id TEXT, deleted INTEGER NOT NULL DEFAULT 0, '
'group_visible INTEGER NOT NULL DEFAULT 0, should_sync INTEGER NOT '
'NULL DEFAULT 1, auto_add INTEGER NOT NULL DEFAULT 0, favorites '
'INTEGER NOT NULL DEFAULT 0, group_is_read_only INTEGER NOT NULL '
'DEFAULT 0, sync1 TEXT, sync2 TEXT, sync3 TEXT, sync4 TEXT , '
'account_id INTEGER REFERENCES accounts(_id))'),
'mimetypes': (
'CREATE TABLE mimetypes (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'mimetype TEXT NOT NULL)'),
'name_lookup': (
'CREATE TABLE name_lookup (data_id INTEGER REFERENCES data(_id) NOT '
'NULL, raw_contact_id INTEGER REFERENCES raw_contacts(_id) NOT '
'NULL, normalized_name TEXT NOT NULL, name_type INTEGER NOT NULL, '
'PRIMARY KEY (data_id, normalized_name, name_type))'),
'nickname_lookup': (
'CREATE TABLE nickname_lookup (name TEXT, cluster TEXT)'),
'packages': (
'CREATE TABLE packages (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'package TEXT NOT NULL)'),
'phone_lookup': (
'CREATE TABLE phone_lookup (data_id INTEGER REFERENCES data(_id) '
'NOT NULL, raw_contact_id INTEGER REFERENCES raw_contacts(_id) NOT '
'NULL, normalized_number TEXT NOT NULL, min_match TEXT NOT NULL)'),
'photo_files': (
'CREATE TABLE photo_files (_id INTEGER PRIMARY KEY AUTOINCREMENT, '
'height INTEGER NOT NULL, width INTEGER NOT NULL, filesize INTEGER '
'NOT NULL)'),
'properties': (
'CREATE TABLE properties (property_key TEXT PRIMARY KEY, '
'property_value TEXT )')}]
CALL_TYPE = {
1: 'INCOMING',
2: 'OUTGOING',
3: 'MISSED'}
def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a Call record row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
call_type = self._GetRowValue(query_hash, row, 'type')
call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN')
duration = self._GetRowValue(query_hash, row, 'duration')
timestamp = self._GetRowValue(query_hash, row, 'date')
event_data = AndroidCallEventData()
event_data.call_type = call_type
event_data.duration = self._GetRowValue(query_hash, row, 'duration')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.number = self._GetRowValue(query_hash, row, 'number')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Started')
parser_mediator.ProduceEventWithEventData(event, event_data)
if duration:
if isinstance(duration, str):
try:
duration = int(duration, 10)
except ValueError:
duration = 0
# The duration is in seconds and the date value in milliseconds.
timestamp += duration * 1000
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Ended')
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(AndroidCallPlugin)
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
from idl_types import inherits_interface
from v8_globals import includes, interfaces
import v8_types
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, cpp_name, has_extended_attribute,
has_extended_attribute_value, scoped_name, strip_suffix,
uncapitalize, extended_attribute_value_as_list, is_unforgeable,
is_legacy_interface_type_checking)
def attribute_context(interface, attribute):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
idl_type.add_includes_for_type(extended_attributes)
if idl_type.enum_values:
includes.add('core/inspector/ConsoleMessage.h')
# [CheckSecurity]
is_do_not_check_security = 'DoNotCheckSecurity' in extended_attributes
is_check_security_for_frame = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Frame') and
not is_do_not_check_security)
is_check_security_for_node = (
has_extended_attribute_value(attribute, 'CheckSecurity', 'Node'))
is_check_security_for_window = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Window') and
not is_do_not_check_security)
if is_check_security_for_frame or is_check_security_for_node or is_check_security_for_window:
includes.add('bindings/core/v8/BindingSecurity.h')
# [CustomElementCallbacks], [Reflect]
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
is_reflect = 'Reflect' in extended_attributes
if is_custom_element_callbacks or is_reflect:
includes.add('core/dom/custom/CustomElementProcessingStack.h')
# [ImplementedInPrivateScript]
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
# [PerWorldBindings]
if 'PerWorldBindings' in extended_attributes:
assert idl_type.is_wrapper_type or 'LogActivity' in extended_attributes, '[PerWorldBindings] should only be used with wrapper types: %s.%s' % (interface.name, attribute.name)
if (base_idl_type == 'EventHandler' and
interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
cached_attribute_validation_method = extended_attributes.get('CachedAttribute')
keep_alive_for_gc = is_keep_alive_for_gc(interface, attribute)
if cached_attribute_validation_method or keep_alive_for_gc:
includes.add('bindings/core/v8/V8HiddenValue.h')
context = {
'access_control_list': access_control_list(interface, attribute),
'activity_logging_world_list_for_getter': v8_utilities.activity_logging_world_list(attribute, 'Getter'), # [ActivityLogging]
'activity_logging_world_list_for_setter': v8_utilities.activity_logging_world_list(attribute, 'Setter'), # [ActivityLogging]
'activity_logging_world_check': v8_utilities.activity_logging_world_check(attribute), # [ActivityLogging]
'argument_cpp_type': idl_type.cpp_type_args(used_as_rvalue_type=True),
'cached_attribute_validation_method': cached_attribute_validation_method,
'conditional_string': v8_utilities.conditional_string(attribute),
'constructor_type': idl_type.constructor_type_name
if is_constructor_attribute(attribute) else None,
'cpp_name': cpp_name(attribute),
'cpp_type': idl_type.cpp_type,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'deprecate_as': v8_utilities.deprecate_as(attribute), # [DeprecateAs]
'enum_type': idl_type.enum_type,
'enum_values': idl_type.enum_values,
'exposed_test': v8_utilities.exposed(attribute, interface), # [Exposed]
'has_custom_getter': has_custom_getter(attribute),
'has_custom_setter': has_custom_setter(attribute),
'has_setter': has_setter(attribute),
'idl_type': str(idl_type), # need trailing [] on array for Dictionary::ConversionContext::setConversionType
'is_call_with_execution_context': has_extended_attribute_value(attribute, 'CallWith', 'ExecutionContext'),
'is_call_with_script_state': has_extended_attribute_value(attribute, 'CallWith', 'ScriptState'),
'is_check_security_for_frame': is_check_security_for_frame,
'is_check_security_for_node': is_check_security_for_node,
'is_check_security_for_window': is_check_security_for_window,
'is_custom_element_callbacks': is_custom_element_callbacks,
'is_expose_js_accessors': is_expose_js_accessors(interface, attribute),
'is_getter_raises_exception': # [RaisesException]
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in (None, 'Getter'),
'is_implemented_in_private_script': is_implemented_in_private_script,
'is_keep_alive_for_gc': keep_alive_for_gc,
'is_lenient_this': 'LenientThis' in extended_attributes,
'is_nullable': idl_type.is_nullable,
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_partial_interface_member':
'PartialInterfaceImplementedAs' in extended_attributes,
'is_per_world_bindings': 'PerWorldBindings' in extended_attributes,
'is_put_forwards': 'PutForwards' in extended_attributes,
'is_read_only': attribute.is_read_only,
'is_reflect': is_reflect,
'is_replaceable': 'Replaceable' in attribute.extended_attributes,
'is_static': attribute.is_static,
'is_url': 'URL' in extended_attributes,
'is_unforgeable': is_unforgeable(interface, attribute),
'on_instance': v8_utilities.on_instance(interface, attribute),
'on_interface': v8_utilities.on_interface(interface, attribute),
'on_prototype': v8_utilities.on_prototype(interface, attribute),
'use_output_parameter_for_result': idl_type.use_output_parameter_for_result,
'measure_as': v8_utilities.measure_as(attribute, interface), # [MeasureAs]
'name': attribute.name,
'only_exposed_to_private_script': is_only_exposed_to_private_script,
'private_script_v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue', bailout_return_value='false', isolate='scriptState->isolate()'),
'property_attributes': property_attributes(interface, attribute),
'reflect_empty': extended_attributes.get('ReflectEmpty'),
'reflect_invalid': extended_attributes.get('ReflectInvalid', ''),
'reflect_missing': extended_attributes.get('ReflectMissing'),
'reflect_only': extended_attribute_value_as_list(attribute, 'ReflectOnly'),
'runtime_enabled_function': v8_utilities.runtime_enabled_function_name(attribute), # [RuntimeEnabled]
'should_be_exposed_to_script': not (is_implemented_in_private_script and is_only_exposed_to_private_script),
'world_suffixes': ['', 'ForMainWorld']
if 'PerWorldBindings' in extended_attributes
else [''], # [PerWorldBindings]
}
if is_constructor_attribute(attribute):
update_constructor_attribute_context(interface, attribute, context)
if not has_custom_getter(attribute):
getter_context(interface, attribute, context)
if not has_custom_setter(attribute) and has_setter(attribute):
setter_context(interface, attribute, context)
return context
################################################################################
# Getter
################################################################################
def getter_context(interface, attribute, context):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
cpp_value = getter_expression(interface, attribute, context)
# Normally we can inline the function call into the return statement to
# avoid the overhead of using a Ref<> temporary, but for some cases
# (nullable types, EventHandler, [CachedAttribute], or if there are
# exceptions), we need to use a local variable.
# FIXME: check if compilers are smart enough to inline this, and if so,
# always use a local variable (for readability and CG simplicity).
release = False
if 'ImplementedInPrivateScript' in extended_attributes:
if (not idl_type.is_wrapper_type and
not idl_type.is_basic_type and
not idl_type.is_enum):
raise Exception('Private scripts supports only primitive types and DOM wrappers.')
context['cpp_value_original'] = cpp_value
cpp_value = 'result'
# EventHandler has special handling
if base_idl_type != 'EventHandler':
release = idl_type.release
elif (idl_type.is_explicit_nullable or
base_idl_type == 'EventHandler' or
'CachedAttribute' in extended_attributes or
'ReflectOnly' in extended_attributes or
context['is_keep_alive_for_gc'] or
context['is_getter_raises_exception']):
context['cpp_value_original'] = cpp_value
cpp_value = 'cppValue'
# EventHandler has special handling
if base_idl_type != 'EventHandler':
release = idl_type.release
def v8_set_return_value_statement(for_main_world=False):
if context['is_keep_alive_for_gc'] or 'CachedAttribute' in extended_attributes:
return 'v8SetReturnValue(info, v8Value)'
return idl_type.v8_set_return_value(
cpp_value, extended_attributes=extended_attributes, script_wrappable='impl',
release=release, for_main_world=for_main_world, is_static=attribute.is_static)
context.update({
'cpp_value': cpp_value,
'cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
cpp_value=cpp_value, creation_context='holder',
extended_attributes=extended_attributes),
'v8_set_return_value_for_main_world': v8_set_return_value_statement(for_main_world=True),
'v8_set_return_value': v8_set_return_value_statement(),
})
def getter_expression(interface, attribute, context):
arguments = []
this_getter_base_name = getter_base_name(interface, attribute, arguments)
getter_name = scoped_name(interface, attribute, this_getter_base_name)
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
arguments.append('impl')
arguments.append('&result')
arguments.extend(v8_utilities.call_with_arguments(
attribute.extended_attributes.get('CallWith')))
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in attribute.extended_attributes and
not 'ImplementedInPrivateScript' in attribute.extended_attributes and
not attribute.is_static):
arguments.append('*impl')
if attribute.idl_type.is_explicit_nullable:
arguments.append('isNull')
if context['is_getter_raises_exception']:
arguments.append('exceptionState')
if attribute.idl_type.use_output_parameter_for_result:
arguments.append('result')
return '%s(%s)' % (getter_name, ', '.join(arguments))
CONTENT_ATTRIBUTE_GETTER_NAMES = {
'boolean': 'fastHasAttribute',
'long': 'getIntegralAttribute',
'unsigned long': 'getUnsignedIntegralAttribute',
}
def getter_base_name(interface, attribute, arguments):
extended_attributes = attribute.extended_attributes
if 'ImplementedInPrivateScript' in extended_attributes:
return '%sAttributeGetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in extended_attributes:
return uncapitalize(cpp_name(attribute))
content_attribute_name = extended_attributes['Reflect'] or attribute.name.lower()
if content_attribute_name in ['class', 'id', 'name']:
# Special-case for performance optimization.
return 'get%sAttribute' % content_attribute_name.capitalize()
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_GETTER_NAMES:
return CONTENT_ATTRIBUTE_GETTER_NAMES[base_idl_type]
if 'URL' in attribute.extended_attributes:
return 'getURLAttribute'
return 'fastGetAttribute'
def is_keep_alive_for_gc(interface, attribute):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
return (
# For readonly attributes, for performance reasons we keep the attribute
# wrapper alive while the owner wrapper is alive, because the attribute
# never changes.
(attribute.is_read_only and
idl_type.is_wrapper_type and
# There are some exceptions, however:
not(
# Node lifetime is managed by object grouping.
inherits_interface(interface.name, 'Node') or
inherits_interface(base_idl_type, 'Node') or
# A self-reference is unnecessary.
attribute.name == 'self' or
# FIXME: Remove these hard-coded hacks.
base_idl_type in ['EventTarget', 'Window'] or
base_idl_type.startswith(('HTML', 'SVG')))))
################################################################################
# Setter
################################################################################
def setter_context(interface, attribute, context):
if 'PutForwards' in attribute.extended_attributes:
# Use target interface and attribute in place of original interface and
# attribute from this point onwards.
target_interface_name = attribute.idl_type.base_type
target_attribute_name = attribute.extended_attributes['PutForwards']
interface = interfaces[target_interface_name]
try:
attribute = next(candidate
for candidate in interface.attributes
if candidate.name == target_attribute_name)
except StopIteration:
raise Exception('[PutForward] target not found:\n'
'Attribute "%s" is not present in interface "%s"' %
(target_attribute_name, target_interface_name))
if ('Replaceable' in attribute.extended_attributes or
is_constructor_attribute(attribute)):
context['cpp_setter'] = '%sCreateDataProperty(propertyName, v8Value, info)' % cpp_name(interface)
return
extended_attributes = attribute.extended_attributes
idl_type = attribute.idl_type
# [RaisesException], [RaisesException=Setter]
is_setter_raises_exception = (
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in [None, 'Setter'])
# [TypeChecking=Interface] / [LegacyInterfaceTypeChecking]
has_type_checking_interface = (
not is_legacy_interface_type_checking(interface, attribute) and
idl_type.is_wrapper_type)
context.update({
'has_setter_exception_state':
is_setter_raises_exception or has_type_checking_interface or
idl_type.v8_conversion_needs_exception_state,
'has_type_checking_interface': has_type_checking_interface,
'is_setter_call_with_execution_context': has_extended_attribute_value(
attribute, 'SetterCallWith', 'ExecutionContext'),
'is_setter_raises_exception': is_setter_raises_exception,
'private_script_cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
'cppValue', isolate='scriptState->isolate()',
creation_context='scriptState->context()->Global()'),
'v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue'),
})
# setter_expression() depends on context values we set above.
context['cpp_setter'] = setter_expression(interface, attribute, context)
def setter_expression(interface, attribute, context):
extended_attributes = attribute.extended_attributes
arguments = v8_utilities.call_with_arguments(
extended_attributes.get('SetterCallWith') or
extended_attributes.get('CallWith'))
this_setter_base_name = setter_base_name(interface, attribute, arguments)
setter_name = scoped_name(interface, attribute, this_setter_base_name)
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in extended_attributes and
not 'ImplementedInPrivateScript' in extended_attributes and
not attribute.is_static):
arguments.append('*impl')
idl_type = attribute.idl_type
if 'ImplementedInPrivateScript' in extended_attributes:
arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
arguments.append('impl')
arguments.append('cppValue')
elif idl_type.base_type == 'EventHandler':
getter_name = scoped_name(interface, attribute, cpp_name(attribute))
context['event_handler_getter_expression'] = '%s(%s)' % (
getter_name, ', '.join(arguments))
if (interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
arguments.append('V8EventListenerList::findOrCreateWrapper<V8ErrorHandler>(v8Value, true, ScriptState::current(info.GetIsolate()))')
else:
arguments.append('V8EventListenerList::getEventListener(ScriptState::current(info.GetIsolate()), v8Value, true, ListenerFindOrCreate)')
elif idl_type.is_interface_type:
# FIXME: should be able to eliminate WTF::getPtr in most or all cases
arguments.append('WTF::getPtr(cppValue)')
else:
arguments.append('cppValue')
if context['is_setter_raises_exception']:
arguments.append('exceptionState')
return '%s(%s)' % (setter_name, ', '.join(arguments))
CONTENT_ATTRIBUTE_SETTER_NAMES = {
'boolean': 'setBooleanAttribute',
'long': 'setIntegralAttribute',
'unsigned long': 'setUnsignedIntegralAttribute',
}
def setter_base_name(interface, attribute, arguments):
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
return '%sAttributeSetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in attribute.extended_attributes:
return 'set%s' % capitalize(cpp_name(attribute))
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_SETTER_NAMES:
return CONTENT_ATTRIBUTE_SETTER_NAMES[base_idl_type]
return 'setAttribute'
def scoped_content_attribute_name(interface, attribute):
content_attribute_name = attribute.extended_attributes['Reflect'] or attribute.name.lower()
if interface.name.startswith('SVG'):
namespace = 'SVGNames'
else:
namespace = 'HTMLNames'
includes.add('core/%s.h' % namespace)
return '%s::%sAttr' % (namespace, content_attribute_name)
################################################################################
# Attribute configuration
################################################################################
# [PutForwards], [Replaceable]
def has_setter(attribute):
return (not attribute.is_read_only or
'PutForwards' in attribute.extended_attributes or
'Replaceable' in attribute.extended_attributes)
# [DoNotCheckSecurity], [Unforgeable]
def access_control_list(interface, attribute):
extended_attributes = attribute.extended_attributes
access_control = []
if 'DoNotCheckSecurity' in extended_attributes:
do_not_check_security = extended_attributes['DoNotCheckSecurity']
if do_not_check_security == 'Setter':
access_control.append('v8::ALL_CAN_WRITE')
else:
access_control.append('v8::ALL_CAN_READ')
if has_setter(attribute):
access_control.append('v8::ALL_CAN_WRITE')
if is_unforgeable(interface, attribute):
access_control.append('v8::PROHIBITS_OVERWRITING')
return access_control or ['v8::DEFAULT']
# [NotEnumerable], [Unforgeable]
def property_attributes(interface, attribute):
extended_attributes = attribute.extended_attributes
property_attributes_list = []
if ('NotEnumerable' in extended_attributes or
is_constructor_attribute(attribute)):
property_attributes_list.append('v8::DontEnum')
if is_unforgeable(interface, attribute):
property_attributes_list.append('v8::DontDelete')
return property_attributes_list or ['v8::None']
# [Custom], [Custom=Getter]
def has_custom_getter(attribute):
extended_attributes = attribute.extended_attributes
return ('Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Getter'])
# [Custom], [Custom=Setter]
def has_custom_setter(attribute):
extended_attributes = attribute.extended_attributes
return (not attribute.is_read_only and
'Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Setter'])
# [ExposeJSAccessors]
def is_expose_js_accessors(interface, attribute):
# Default behavior
is_accessor = True
if ('ExposeJSAccessors' in interface.extended_attributes and
'DoNotExposeJSAccessors' in interface.extended_attributes):
raise Exception('Both of ExposeJSAccessors and DoNotExposeJSAccessors are specified at a time in an interface: ' + interface.name)
if 'ExposeJSAccessors' in interface.extended_attributes:
is_accessor = True
if 'DoNotExposeJSAccessors' in interface.extended_attributes:
is_accessor = False
# Note that ExposeJSAccessors and DoNotExposeJSAccessors are more powerful
# than 'static', [Unforgeable] and [OverrideBuiltins].
if ('ExposeJSAccessors' in attribute.extended_attributes and
'DoNotExposeJSAccessors' in attribute.extended_attributes):
raise Exception('Both of ExposeJSAccessors and DoNotExposeJSAccessors are specified at a time on an attribute: ' + attribute.name + ' in an interface: ' + interface.name)
if 'ExposeJSAccessors' in attribute.extended_attributes:
return True
if 'DoNotExposeJSAccessors' in attribute.extended_attributes:
return False
# These attributes must not be accessors on prototype chains.
if (is_constructor_attribute(attribute) or
attribute.is_static or
is_unforgeable(interface, attribute) or
'OverrideBuiltins' in interface.extended_attributes):
return False
# The members of Window interface must be placed on the instance object.
if interface.name == 'Window':
return False
return is_accessor
################################################################################
# Constructors
################################################################################
idl_types.IdlType.constructor_type_name = property(
# FIXME: replace this with a [ConstructorAttribute] extended attribute
lambda self: strip_suffix(self.base_type, 'Constructor'))
def is_constructor_attribute(attribute):
# FIXME: replace this with [ConstructorAttribute] extended attribute
return attribute.idl_type.name.endswith('Constructor')
def update_constructor_attribute_context(interface, attribute, context):
context['needs_constructor_getter_callback'] = context['measure_as'] or context['deprecate_as']
# When the attribute name is the same as the interface name, do not generate
# callback functions for each attribute and use
# {{cpp_class}}ConstructorAttributeSetterCallback. Otherwise, generate
# a callback function in order to hard-code the attribute name.
context['needs_constructor_setter_callback'] = context['name'] != context['constructor_type']
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp>
## Arnaud Ebalard <arnaud.ebalard@eads.net>
"""
Routing and network interface handling for IPv6.
"""
#############################################################################
#############################################################################
### Routing/Interfaces stuff ###
#############################################################################
#############################################################################
import socket
from config import conf
from utils6 import *
from arch import *
class Route6:
def __init__(self):
self.invalidate_cache()
self.resync()
def invalidate_cache(self):
self.cache = {}
def flush(self):
self.invalidate_cache()
self.routes = []
def resync(self):
# TODO : At the moment, resync will drop existing Teredo routes
# if any. Change that ...
self.invalidate_cache()
self.routes = read_routes6()
if self.routes == []:
log_loading.info("No IPv6 support in kernel")
def __repr__(self):
rtlst = [('Destination', 'Next Hop', "iface", "src candidates")]
for net,msk,gw,iface,cset in self.routes:
rtlst.append(('%s/%i'% (net,msk), gw, iface, ", ".join(cset)))
colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, rtlst))
fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth))
rt = "\n".join(map(lambda x: fmt % x, rtlst))
return rt
# Unlike Scapy's Route.make_route() function, we do not have 'host' and 'net'
# parameters. We only have a 'dst' parameter that accepts 'prefix' and
# 'prefix/prefixlen' values.
# WARNING: Providing a specific device will at the moment not work correctly.
def make_route(self, dst, gw=None, dev=None):
"""Internal function : create a route for 'dst' via 'gw'.
"""
prefix, plen = (dst.split("/")+["128"])[:2]
plen = int(plen)
if gw is None:
gw = "::"
if dev is None:
dev, ifaddr, x = self.route(gw)
else:
# TODO: do better than that
# replace that unique address by the list of all addresses
lifaddr = in6_getifaddr()
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
ifaddr = construct_source_candidate_set(prefix, plen, devaddrs, LOOPBACK_NAME)
return (prefix, plen, gw, dev, ifaddr)
def add(self, *args, **kargs):
"""Ex:
add(dst="2001:db8:cafe:f000::/56")
add(dst="2001:db8:cafe:f000::/56", gw="2001:db8:cafe::1")
add(dst="2001:db8:cafe:f000::/64", gw="2001:db8:cafe::1", dev="eth0")
"""
self.invalidate_cache()
self.routes.append(self.make_route(*args, **kargs))
def delt(self, dst, gw=None):
""" Ex:
delt(dst="::/0")
delt(dst="2001:db8:cafe:f000::/56")
delt(dst="2001:db8:cafe:f000::/56", gw="2001:db8:deca::1")
"""
tmp = dst+"/128"
dst, plen = tmp.split('/')[:2]
dst = in6_ptop(dst)
plen = int(plen)
l = filter(lambda x: in6_ptop(x[0]) == dst and x[1] == plen, self.routes)
if gw:
gw = in6_ptop(gw)
l = filter(lambda x: in6_ptop(x[0]) == gw, self.routes)
if len(l) == 0:
warning("No matching route found")
elif len(l) > 1:
warning("Found more than one match. Aborting.")
else:
i=self.routes.index(l[0])
self.invalidate_cache()
del(self.routes[i])
def ifchange(self, iff, addr):
the_addr, the_plen = (addr.split("/")+["128"])[:2]
the_plen = int(the_plen)
naddr = inet_pton(socket.AF_INET6, the_addr)
nmask = in6_cidr2mask(the_plen)
the_net = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
for i in range(len(self.routes)):
net,plen,gw,iface,addr = self.routes[i]
if iface != iff:
continue
if gw == '::':
self.routes[i] = (the_net,the_plen,gw,iface,the_addr)
else:
self.routes[i] = (net,the_plen,gw,iface,the_addr)
self.invalidate_cache()
ip6_neigh_cache.flush()
def ifdel(self, iff):
""" removes all route entries that uses 'iff' interface. """
new_routes=[]
for rt in self.routes:
if rt[3] != iff:
new_routes.append(rt)
self.invalidate_cache()
self.routes = new_routes
def ifadd(self, iff, addr):
"""
Add an interface 'iff' with provided address into routing table.
Ex: ifadd('eth0', '2001:bd8:cafe:1::1/64') will add following entry into
Scapy6 internal routing table:
Destination Next Hop iface Def src @
2001:bd8:cafe:1::/64 :: eth0 2001:bd8:cafe:1::1
prefix length value can be omitted. In that case, a value of 128
will be used.
"""
addr, plen = (addr.split("/")+["128"])[:2]
addr = in6_ptop(addr)
plen = int(plen)
naddr = inet_pton(socket.AF_INET6, addr)
nmask = in6_cidr2mask(plen)
prefix = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
self.invalidate_cache()
self.routes.append((prefix,plen,'::',iff,[addr]))
def route(self, dst, dev=None):
"""
Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface.
"""
# Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set
dst = dst.split("/")[0]
savedst = dst # In case following inet_pton() fails
dst = dst.replace("*","0")
l = dst.find("-")
while l >= 0:
m = (dst[l:]+":").find(":")
dst = dst[:l]+dst[l+m:]
l = dst.find("-")
try:
inet_pton(socket.AF_INET6, dst)
except socket.error:
dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0]
# TODO : Check if name resolution went well
# Deal with dev-specific request for cache search
k = dst
if dev is not None:
k = dst + "%%" + dev
if k in self.cache:
return self.cache[k]
pathes = []
# TODO : review all kinds of addresses (scope and *cast) to see
# if we are able to cope with everything possible. I'm convinced
# it's not the case.
# -- arnaud
for p, plen, gw, iface, cset in self.routes:
if dev is not None and iface != dev:
continue
if in6_isincluded(dst, p, plen):
pathes.append((plen, (iface, cset, gw)))
elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])):
pathes.append((plen, (iface, cset, gw)))
if not pathes:
warning("No route found for IPv6 destination %s (no default route?)" % dst)
return (LOOPBACK_NAME, "::", "::") # XXX Linux specific
# Sort with longest prefix first
pathes.sort(reverse=True)
best_plen = pathes[0][0]
pathes = filter(lambda x: x[0] == best_plen, pathes)
res = []
for p in pathes: # Here we select best source address for every route
tmp = p[1]
srcaddr = get_source_addr_from_candidate_set(dst, p[1][1])
if srcaddr is not None:
res.append((p[0], (tmp[0], srcaddr, tmp[2])))
# Symptom : 2 routes with same weight (our weight is plen)
# Solution :
# - dst is unicast global. Check if it is 6to4 and we have a source
# 6to4 address in those available
# - dst is link local (unicast or multicast) and multiple output
# interfaces are available. Take main one (conf.iface6)
# - if none of the previous or ambiguity persists, be lazy and keep
# first one
# XXX TODO : in a _near_ future, include metric in the game
if len(res) > 1:
tmp = []
if in6_isgladdr(dst) and in6_isaddr6to4(dst):
# TODO : see if taking the longest match between dst and
# every source addresses would provide better results
tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res)
elif in6_ismaddr(dst) or in6_islladdr(dst):
# TODO : I'm sure we are not covering all addresses. Check that
tmp = filter(lambda x: x[1][0] == conf.iface6, res)
if tmp:
res = tmp
# Fill the cache (including dev-specific request)
k = dst
if dev is not None:
k = dst + "%%" + dev
self.cache[k] = res[0][1]
return res[0][1]
conf.route6 = Route6()
_res = conf.route6.route("::/0")
if _res:
iff, gw, addr = _res
conf.iface6 = iff
del(_res)
|
|
# Copyright (C) 2016, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_log import log as logging
from tempest.api.compute import base
from tempest.common import utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
class DeviceTaggingTest(base.BaseV2ComputeTest):
min_microversion = '2.32'
# NOTE(mriedem): max_version looks odd but it's actually correct. Due to a
# bug in the 2.32 microversion, tags on block devices only worked with the
# 2.32 microversion specifically. And tags on networks only worked between
# 2.32 and 2.36 inclusive; the 2.37 microversion broke tags for networks.
max_microversion = '2.32'
@classmethod
def skip_checks(cls):
super(DeviceTaggingTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron is required')
if not CONF.validation.run_validation:
raise cls.skipException('Validation must be enabled')
if (not CONF.compute_feature_enabled.config_drive
and not CONF.compute_feature_enabled.metadata_service):
raise cls.skipException('One of metadata or config drive must be '
'enabled')
@classmethod
def setup_clients(cls):
super(DeviceTaggingTest, cls).setup_clients()
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.subnets_client = cls.os_primary.subnets_client
cls.interfaces_client = cls.os_primary.interfaces_client
@classmethod
def setup_credentials(cls):
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
super(DeviceTaggingTest, cls).setup_credentials()
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
for d in md_dict['devices']:
if d['type'] == 'nic':
if d['mac'] == self.port1['mac_address']:
self.assertEqual(d['tags'], ['port-1'])
if d['mac'] == self.port2['mac_address']:
self.assertEqual(d['tags'], ['port-2'])
if d['mac'] == self.net_2_100_mac:
self.assertEqual(d['tags'], ['net-2-100'])
if d['mac'] == self.net_2_200_mac:
self.assertEqual(d['tags'], ['net-2-200'])
# A hypervisor may present multiple paths to a tagged disk, so
# there may be duplicated tags in the metadata, use set() to
# remove duplicated tags.
found_devices = [d['tags'][0] for d in md_dict['devices']]
self.assertEqual(set(found_devices), set(['port-1', 'port-2',
'net-1', 'net-2-100',
'net-2-200', 'boot',
'other']))
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
@utils.services('network', 'volume', 'image')
def test_device_tagging(self):
# Create volumes
# The create_volume methods waits for the volumes to be available and
# the base class will clean them up on tearDown.
boot_volume = self.create_volume(CONF.compute.image_ref)
other_volume = self.create_volume()
untagged_volume = self.create_volume()
# Create networks
net1 = self.networks_client.create_network(
name=data_utils.rand_name('device-tagging-net1'))['network']
self.addCleanup(self.networks_client.delete_network, net1['id'])
net2 = self.networks_client.create_network(
name=data_utils.rand_name('device-tagging-net2'))['network']
self.addCleanup(self.networks_client.delete_network, net2['id'])
# Create subnets
subnet1 = self.subnets_client.create_subnet(
network_id=net1['id'],
cidr='10.1.1.0/24',
ip_version=4)['subnet']
self.addCleanup(self.subnets_client.delete_subnet, subnet1['id'])
subnet2 = self.subnets_client.create_subnet(
network_id=net2['id'],
cidr='10.2.2.0/24',
ip_version=4)['subnet']
self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])
# Create ports
self.port1 = self.ports_client.create_port(
network_id=net1['id'],
fixed_ips=[{'subnet_id': subnet1['id']}])['port']
self.addCleanup(self.ports_client.delete_port, self.port1['id'])
self.port2 = self.ports_client.create_port(
network_id=net1['id'],
fixed_ips=[{'subnet_id': subnet1['id']}])['port']
self.addCleanup(self.ports_client.delete_port, self.port2['id'])
# Create server
admin_pass = data_utils.rand_password()
config_drive_enabled = CONF.compute_feature_enabled.config_drive
validation_resources = self.get_test_validation_resources(
self.os_primary)
server = self.create_test_server(
validatable=True,
validation_resources=validation_resources,
config_drive=config_drive_enabled,
adminPass=admin_pass,
name=data_utils.rand_name('device-tagging-server'),
networks=[
# Validation network for ssh
{
'uuid': self.get_tenant_network()['id']
},
# Different tags for different ports
{
'port': self.port1['id'],
'tag': 'port-1'
},
{
'port': self.port2['id'],
'tag': 'port-2'
},
# Two nics on same net, one tagged one not
{
'uuid': net1['id'],
'tag': 'net-1'
},
{
'uuid': net1['id']
},
# Two nics on same net, different IP
{
'uuid': net2['id'],
'fixed_ip': '10.2.2.100',
'tag': 'net-2-100'
},
{
'uuid': net2['id'],
'fixed_ip': '10.2.2.200',
'tag': 'net-2-200'
}
],
block_device_mapping_v2=[
# Boot volume
{
'uuid': boot_volume['id'],
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'tag': 'boot'
},
# Other volume
{
'uuid': other_volume['id'],
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 1,
'tag': 'other'
},
# Untagged volume
{
'uuid': untagged_volume['id'],
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 2
}
])
self.addCleanup(self.delete_server, server['id'])
self.ssh_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
admin_pass,
validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
# Find the MAC addresses of our fixed IPs
self.net_2_100_mac = None
self.net_2_200_mac = None
ifaces = self.interfaces_client.list_interfaces(server['id'])
for iface in ifaces['interfaceAttachments']:
if 'fixed_ips' in iface:
for ip in iface['fixed_ips']:
if ip['ip_address'] == '10.2.2.100':
self.net_2_100_mac = iface['mac_addr']
if ip['ip_address'] == '10.2.2.200':
self.net_2_200_mac = iface['mac_addr']
# Make sure we have the MACs we need, there's no reason for some to be
# missing
self.assertTrue(self.net_2_100_mac)
self.assertTrue(self.net_2_200_mac)
# Verify metadata from metadata service
if CONF.compute_feature_enabled.metadata_service:
md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
LOG.info('Attempting to verify tagged devices in server %s via '
'the metadata service: %s', server['id'], md_url)
def get_and_verify_metadata():
try:
self.ssh_client.exec_command('curl -V')
except exceptions.SSHExecCommandFailed:
if not CONF.compute_feature_enabled.config_drive:
raise self.skipException('curl not found in guest '
'and config drive is '
'disabled')
LOG.warning('curl was not found in the guest, device '
'tagging metadata was not checked in the '
'metadata API')
return True
cmd = 'curl %s' % md_url
md_json = self.ssh_client.exec_command(cmd)
self.verify_device_metadata(md_json)
return True
if not test_utils.call_until_true(get_and_verify_metadata,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException('Timeout while verifying '
'metadata on server.')
# Verify metadata on config drive
if CONF.compute_feature_enabled.config_drive:
cmd_blkid = 'blkid -t LABEL=config-2 -o device'
LOG.info('Attempting to verify tagged devices in server %s via '
'the config drive.', server['id'])
dev_name = self.ssh_client.exec_command(cmd_blkid)
dev_name = dev_name.rstrip()
try:
self.ssh_client.exec_command('sudo mount %s /mnt' % dev_name)
except exceptions.SSHExecCommandFailed:
# So the command failed, let's try to know why and print some
# useful information.
lsblk = self.ssh_client.exec_command('sudo lsblk --fs --ascii')
LOG.error("Mounting %s on /mnt failed. Right after the "
"failure 'lsblk' in the guest reported:\n%s",
dev_name, lsblk)
raise
cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
md_json = self.ssh_client.exec_command(cmd_md)
self.verify_device_metadata(md_json)
class DeviceTaggingTestV2_42(DeviceTaggingTest):
min_microversion = '2.42'
max_microversion = 'latest'
|
|
# -*- coding: utf-8 -*-
"""This file contains a MRUList Registry plugin.
Also see:
https://winreg-kb.readthedocs.io/en/latest/sources/explorer-keys/Most-recently-used.html
"""
import abc
import os
from dtfabric.runtime import data_maps as dtfabric_data_maps
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import dtfabric_helper
from plaso.lib import errors
from plaso.parsers import logger
from plaso.parsers import winreg_parser
from plaso.parsers.shared import shell_items
from plaso.parsers.winreg_plugins import interface
class MRUListEventData(events.EventData):
"""MRUList event data attribute container.
Attributes:
entries (str): most recently used (MRU) entries.
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'windows:registry:mrulist'
def __init__(self):
"""Initializes event data."""
super(MRUListEventData, self).__init__(data_type=self.DATA_TYPE)
self.entries = None
self.key_path = None
class MRUListStringRegistryKeyFilter(
interface.WindowsRegistryKeyWithValuesFilter):
"""Windows Registry key with values filter."""
_IGNORE_KEY_PATH_SUFFIXES = frozenset([
'\\Explorer\\DesktopStreamMRU'.upper()])
_VALUE_NAMES = ('a', 'MRUList')
def __init__(self):
"""Initializes a Windows Registry key filter object."""
super(MRUListStringRegistryKeyFilter, self).__init__(self._VALUE_NAMES)
def Match(self, registry_key):
"""Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the Windows Registry key matches the filter.
"""
key_path = registry_key.path.upper()
# Prevent this filter matching non-string MRUList values.
for ignore_key_path_suffix in self._IGNORE_KEY_PATH_SUFFIXES:
if key_path.endswith(ignore_key_path_suffix):
return False
return super(MRUListStringRegistryKeyFilter, self).Match(registry_key)
class BaseMRUListWindowsRegistryPlugin(
interface.WindowsRegistryPlugin, dtfabric_helper.DtFabricHelper):
"""Class for common MRUList Windows Registry plugin functionality."""
_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'mru.yaml')
@abc.abstractmethod
def _ParseMRUListEntryValue(
self, parser_mediator, registry_key, entry_index, entry_letter, **kwargs):
"""Parses the MRUList entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUList value.
entry_index (int): MRUList entry index.
entry_letter (str): character value representing the entry.
Returns:
str: MRUList entry value.
"""
def _ParseMRUListValue(self, registry_key):
"""Parses the MRUList value in a given Registry key.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUList value.
Returns:
mrulist_entries: MRUList entries or None if not available.
"""
mrulist_value = registry_key.GetValueByName('MRUList')
# The key exists but does not contain a value named "MRUList".
if not mrulist_value:
return None
mrulist_entries_map = self._GetDataTypeMap('mrulist_entries')
context = dtfabric_data_maps.DataTypeMapContext(values={
'data_size': len(mrulist_value.data)})
return self._ReadStructureFromByteStream(
mrulist_value.data, 0, mrulist_entries_map, context=context)
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):
"""Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
try:
mrulist = self._ParseMRUListValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUList value with error: {0!s}'.format(exception))
return
if not mrulist:
return
entries = []
found_terminator = False
for entry_index, entry_letter in enumerate(mrulist):
# The MRU list is terminated with '\0' (0x0000).
if entry_letter == 0:
break
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUList entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
entry_letter = chr(entry_letter)
value_string = self._ParseMRUListEntryValue(
parser_mediator, registry_key, entry_index, entry_letter,
codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:s}]: {2:s}'.format(
entry_index + 1, entry_letter, value_string)
entries.append(value_text)
event_data = MRUListEventData()
event_data.entries = ' '.join(entries)
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
class MRUListStringWindowsRegistryPlugin(BaseMRUListWindowsRegistryPlugin):
"""Windows Registry plugin to parse a string MRUList."""
NAME = 'mrulist_string'
DATA_FORMAT = 'Most Recently Used (MRU) Registry data'
FILTERS = frozenset([MRUListStringRegistryKeyFilter()])
def _ParseMRUListEntryValue(
self, parser_mediator, registry_key, entry_index, entry_letter, **kwargs):
"""Parses the MRUList entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUList value.
entry_index (int): MRUList entry index.
entry_letter (str): character value representing the entry.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:s}'.format(entry_letter))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUList value: {0:s} in key: {1:s}.'.format(
entry_letter, registry_key.path))
elif value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsBinaryData():
logger.debug((
'[{0:s}] Non-string MRUList entry value: {1:s} parsed as string '
'in key: {2:s}.').format(self.NAME, entry_letter, registry_key.path))
utf16le_string_map = self._GetDataTypeMap('utf16le_string')
try:
value_string = self._ReadStructureFromByteStream(
value.data, 0, utf16le_string_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse MRUList entry value: {0:s} with error: '
'{1!s}').format(entry_letter, exception))
value_string = value_string.rstrip('\x00')
return value_string
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
self._ParseMRUListKey(parser_mediator, registry_key, codepage=codepage)
class MRUListShellItemListWindowsRegistryPlugin(
BaseMRUListWindowsRegistryPlugin):
"""Windows Registry plugin to parse a shell item list MRUList."""
NAME = 'mrulist_shell_item_list'
DATA_FORMAT = 'Most Recently Used (MRU) Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\DesktopStreamMRU')])
# pylint: disable=arguments-differ
def _ParseMRUListEntryValue(
self, parser_mediator, registry_key, entry_index, entry_letter,
codepage='cp1252', **kwargs):
"""Parses the MRUList entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUList value.
entry_index (int): MRUList entry index.
entry_letter (str): character value representing the entry.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: MRUList entry value.
"""
value_string = ''
value = registry_key.GetValueByName('{0:s}'.format(entry_letter))
if value is None:
parser_mediator.ProduceExtractionWarning(
'missing MRUList value: {0:s} in key: {1:s}.'.format(
entry_letter, registry_key.path))
elif not value.DataIsBinaryData():
parser_mediator.ProduceExtractionWarning(
'Non-binary MRUList entry value: {1:s} in key: {2:s}.'.format(
entry_letter, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data, codepage=codepage)
shell_item_path = shell_items_parser.CopyToPath() or 'N/A'
value_string = 'Shell item path: {0:s}'.format(shell_item_path)
return value_string
# pylint: disable=arguments-differ
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
self._ParseMRUListKey(parser_mediator, registry_key, codepage=codepage)
winreg_parser.WinRegistryParser.RegisterPlugins([
MRUListStringWindowsRegistryPlugin,
MRUListShellItemListWindowsRegistryPlugin])
|
|
{% if cookiecutter.use_sentry == 'y' -%}
import logging
{% endif -%}
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['{{ cookiecutter.domain_name }}'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
{% if cookiecutter.use_whitenoise == 'y' -%}
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
{%- else %}
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3Boto3Storage'
STATIC_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/'
{%- endif %}
# MEDIA
# ------------------------------------------------------------------------------
{% if cookiecutter.use_whitenoise == 'y' -%}
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/'
{%- else %}
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = 'static'
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = 'media'
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/'
{%- endif %}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='{{cookiecutter.project_name}} <noreply@{{cookiecutter.domain_name}}>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[{{cookiecutter.project_name}}]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
{% if cookiecutter.use_whitenoise == 'y' -%}
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware') # noqa F405
{% endif %}
{%- if cookiecutter.use_compressor == 'y' -%}
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_URL
COMPRESS_URL = STATIC_URL
{% endif %}
{%- if cookiecutter.use_whitenoise == 'n' -%}
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
{% endif %}
{%- if cookiecutter.use_sentry == 'y' -%}
# raven
# ------------------------------------------------------------------------------
# https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat'] # noqa F405
MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware'] + MIDDLEWARE
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env('SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'dsn': SENTRY_DSN
}
{%- else %}
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
{% endif %}
# Your stuff...
# ------------------------------------------------------------------------------
|
|
#
# Training a Network w/Tensorflow
#
# PART 3 - Multiple images
#
# refer:
# 4 imgs, 128x128, 10 iter: biswal home 38.9s; cyclop (vm): 7.7s
# 16 imgs, 128x128, 3 iter: biswal home 46.8s; cyclop (vm): 8.3s
# 16 imgs, 128x128, 10 iter: biswal home: 151.8s; cyclop (vm): 30.2s
# 16 imgs, 128x128, 1k iter: cyclop (centos vm): 3058s (~51m)
# 2098s (bugfix gif too large)
#
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
import tensorflow as tf
from libs import gif, utils
import IPython.display as ipyd
from datetime import datetime
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
plt.ion()
def split_image(img):
# We'll first collect all the positions in the image in our list, xs
xs = []
# And the corresponding colors for each of these positions
ys = []
# Now loop over the image
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
# And store the inputs
xs.append([row_i, col_i])
# And outputs that the network needs to learn to predict
ys.append(img[row_i, col_i])
# we'll convert our lists to arrays
xs = np.array(xs)
ys = np.array(ys)
return xs, ys
def build_model(xs, ys, n_neurons, n_layers, activation_fn,
final_activation_fn, cost_type):
xs = np.asarray(xs)
ys = np.asarray(ys)
if xs.ndim != 2:
raise ValueError(
'xs should be a n_observates x n_features, ' +
'or a 2-dimensional array.')
if ys.ndim != 2:
raise ValueError(
'ys should be a n_observates x n_features, ' +
'or a 2-dimensional array.')
n_xs = xs.shape[1]
n_ys = ys.shape[1]
X = tf.placeholder(name='X', shape=[None, n_xs],
dtype=tf.float32)
Y = tf.placeholder(name='Y', shape=[None, n_ys],
dtype=tf.float32)
current_input = X
for layer_i in range(n_layers):
current_input = utils.linear(
current_input, n_neurons,
activation=activation_fn,
name='layer{}'.format(layer_i))[0]
Y_pred = utils.linear(
current_input, n_ys,
activation=final_activation_fn,
name='pred')[0]
if cost_type == 'l1_norm':
cost = tf.reduce_mean(tf.reduce_sum(
tf.abs(Y - Y_pred), 1))
elif cost_type == 'l2_norm':
cost = tf.reduce_mean(tf.reduce_sum(
tf.squared_difference(Y, Y_pred), 1))
else:
raise ValueError(
'Unknown cost_type: {}. '.format(
cost_type) + 'Use only "l1_norm" or "l2_norm"')
return {'X': X, 'Y': Y, 'Y_pred': Y_pred, 'cost': cost}
def train(imgs,
learning_rate=0.0001,
batch_size=200,
n_iterations=10,
gif_step=2,
n_neurons=30,
n_layers=10,
activation_fn=tf.nn.relu,
final_activation_fn=tf.nn.tanh,
cost_type='l2_norm'):
N, H, W, C = imgs.shape
all_xs, all_ys = [], []
for img_i, img in enumerate(imgs):
xs, ys = split_image(img)
all_xs.append(np.c_[xs, np.repeat(img_i, [xs.shape[0]])])
all_ys.append(ys)
xs = np.array(all_xs).reshape(-1, 3)
xs = (xs - np.mean(xs, 0)) / np.std(xs, 0)
ys = np.array(all_ys).reshape(-1, 3)
ys = ys / 127.5 - 1
g = tf.Graph()
with tf.Session(graph=g) as sess:
model = build_model(xs, ys, n_neurons, n_layers,
activation_fn, final_activation_fn,
cost_type)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(model['cost'])
sess.run(tf.initialize_all_variables())
gifs = []
costs = []
step_i = 0
for it_i in range(n_iterations):
# Get a random sampling of the dataset
idxs = np.random.permutation(range(len(xs)))
# The number of batches we have to iterate over
n_batches = len(idxs) // batch_size
training_cost = 0
# Now iterate over our stochastic minibatches:
for batch_i in range(n_batches):
# Get just minibatch amount of data
idxs_i = idxs[batch_i * batch_size:
(batch_i + 1) * batch_size]
# And optimize, also returning the cost so we can monitor
# how our optimization is doing.
cost = sess.run(
[model['cost'], optimizer],
feed_dict={model['X']: xs[idxs_i],
model['Y']: ys[idxs_i]})[0]
training_cost += cost
print('iteration {}/{}: cost {}'.format(
it_i + 1, n_iterations, training_cost / n_batches))
# Also, every 20 iterations, we'll draw the prediction of our
# input xs, which should try to recreate our image!
if (it_i + 1) % gif_step == 0:
costs.append(training_cost / n_batches)
ys_pred = model['Y_pred'].eval(
feed_dict={model['X']: xs}, session=sess)
img = ys_pred.reshape(imgs.shape)
gifs.append(img)
return gifs
import urllib
def get_celeb_files(qfil):
if not os.path.exists('img_align_celeba'):
os.mkdir('img_align_celeba')
for img_i in range(1, qfil+1):
f = '000%03d.jpg' % img_i
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
print(url, end='\r')
urllib.request.urlretrieve(url, os.path.join('img_align_celeba', f))
files = [os.path.join('img_align_celeba', file_i)
for file_i in os.listdir('img_align_celeba')
if '.jpg' in file_i]
return files[0:qfil]
def get_celeb_imgs(qpic):
"""
Returns
-------
imgs : list of np.ndarray
List of the first <qpic> images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(qpic)]
#############################################################
#
# MAIN
#
print("Reading images...")
QNT=16
switchcelebs=False
if switchcelebs:
celeb_imgs = np.array(get_celeb_imgs(QNT))
plt.figure(figsize=(6, 6))
print(celeb_imgs)
print (np.array(celeb_imgs).shape)
pltdataset=utils.montage(celeb_imgs, saveto="batch2_3_temp_dataset.png").astype(np.uint8)
plt.imshow(pltdataset)
plt.imsave(fname='batch2_3_dataset.png', arr=pltdataset)
trainimgs = np.array(celeb_imgs).copy()
else:
dirname = "labdogs"
filenames = [os.path.join(dirname, fname)
for fname in os.listdir(dirname)]
filenames = filenames[:QNT]
assert(len(filenames) == QNT)
#myimgs = [plt.imread(fname)[..., :3] for fname in filenames]
myimgs=np.array([plt.imread(fname) for fname in filenames])
myimgs = [utils.imcrop_tosquare(img_i) for img_i in myimgs]
myimgs = [resize(img_i, (128,128)) for img_i in myimgs]
myimgs=np.clip(np.array(myimgs)*255, 0, 255).astype(np.uint8) # fix resize() conversion to 0..1
pltdataset=utils.montage(myimgs, saveto="batch2_3_temp_dataset.png").astype(np.uint8)
plt.imshow(pltdataset)
plt.imsave(fname='batch2_3_dataset.png', arr=pltdataset)
trainimgs = np.array(myimgs).copy()
plt.show()
plt.pause(1)
print("Training...")
t1 = datetime.now()
trainedgifs = train(imgs=trainimgs, n_iterations=1000, gif_step=50)
t2 = datetime.now()
delta = t2 - t1
print(" Total training time: ", delta.total_seconds())
plt.close()
print("Saving results...")
montage_gifs = [np.clip(utils.montage(
(m * 127.5) + 127.5, saveto='batch2_3_montage_temp.png'), 0, 255).astype(np.uint8)
for m in trainedgifs]
_ = gif.build_gif(montage_gifs, saveto='batch2_3_multiple.gif')
plt.show()
plt.pause(5)
plt.close()
final = trainedgifs[-1]
final_gif = [np.clip(((m * 127.5) + 127.5), 0, 255).astype(np.uint8) for m in final]
gif.build_gif(final_gif, saveto='batch2_3_final.gif')
#plt.imshow(_)
plt.show()
plt.pause(5)
plt.close()
# eop
|
|
#!/usr/bin/env python
"""
The Computer Language Benchmarks Game
http://benchmarksgame.alioth.debian.org/
regex-dna Python 3 #5 program:
contributed by Dominique Wahli
2to3
modified by Justin Peel
fasta Python 3 #3 program:
modified by Ian Osgood
modified again by Heinrich Acker
modified by Justin Peel
Modified by Christopher Sean Forgeron
"""
import bisect
import re
import pyperf
DEFAULT_INIT_LEN = 100000
DEFAULT_RNG_SEED = 42
ALU = ('GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG'
'GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA'
'CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT'
'ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA'
'GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG'
'AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC'
'AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA')
IUB = list(zip('acgtBDHKMNRSVWY', [0.27, 0.12, 0.12, 0.27] + [0.02] * 11))
HOMOSAPIENS = [
('a', 0.3029549426680),
('c', 0.1979883004921),
('g', 0.1975473066391),
('t', 0.3015094502008),
]
def make_cumulative(table):
P = []
C = []
prob = 0.
for char, p in table:
prob += p
P += [prob]
C += [ord(char)]
return (P, C)
def repeat_fasta(src, n, nprint):
width = 60
is_trailing_line = False
count_modifier = 0.0
len_of_src = len(src)
ss = src + src + src[:n % len_of_src]
# CSF - It's faster to work with a bytearray than a string
s = bytearray(ss, encoding='utf8')
if n % width:
# We don't end on a 60 char wide line
is_trailing_line = True
count_modifier = 1.0
# CSF - Here we are stuck with using an int instead of a float for the loop,
# but testing showed it still to be faster than a for loop
count = 0
end = (n / float(width)) - count_modifier
while count < end:
i = count * 60 % len_of_src
nprint(s[i:i + 60] + b'\n')
count += 1
if is_trailing_line:
nprint(s[-(n % width):] + b'\n')
def random_fasta(table, n, seed, nprint):
width = 60
r = range(width)
bb = bisect.bisect
# If we don't have a multiple of the width, then we will have a trailing
# line, which needs a slightly different approach
is_trailing_line = False
count_modifier = 0.0
line = bytearray(width + 1) # Width of 60 + 1 for the \n char
probs, chars = make_cumulative(table)
# pRNG Vars
im = 139968.0
seed = float(seed)
if n % width:
# We don't end on a 60 char wide line
is_trailing_line = True
count_modifier = 1.0
# CSF - Loops with a high iteration count run faster as a while/float loop.
count = 0.0
end = (n / float(width)) - count_modifier
while count < end:
# CSF - Low iteration count loops may run faster as a for loop.
for i in r:
# CSF - Python is faster for all float math than it is for int, on my
# machine at least.
seed = (seed * 3877.0 + 29573.0) % 139968.0
# CSF - While real values, not variables are faster for most things, on my
# machine, it's faster to have 'im' already in a var
line[i] = chars[bb(probs, seed / im)]
line[60] = 10 # End of Line
nprint(line)
count += 1.0
if is_trailing_line:
for i in range(n % width):
seed = (seed * 3877.0 + 29573.0) % 139968.0
line[i] = chars[bb(probs, seed / im)]
nprint(line[:i + 1] + b"\n")
return seed
def init_benchmarks(n, rng_seed):
result = bytearray()
nprint = result.extend
nprint(b'>ONE Homo sapiens alu\n')
repeat_fasta(ALU, n * 2, nprint=nprint)
# We need to keep track of the state of 'seed' so we pass it in, and return
# it back so our output can pass the diff test
nprint(b'>TWO IUB ambiguity codes\n')
seed = random_fasta(IUB, n * 3, seed=rng_seed, nprint=nprint)
nprint(b'>THREE Homo sapiens frequency\n')
random_fasta(HOMOSAPIENS, n * 5, seed, nprint=nprint)
return bytes(result)
VARIANTS = (
b'agggtaaa|tttaccct',
b'[cgt]gggtaaa|tttaccc[acg]',
b'a[act]ggtaaa|tttacc[agt]t',
b'ag[act]gtaaa|tttac[agt]ct',
b'agg[act]taaa|ttta[agt]cct',
b'aggg[acg]aaa|ttt[cgt]ccct',
b'agggt[cgt]aa|tt[acg]accct',
b'agggta[cgt]a|t[acg]taccct',
b'agggtaa[cgt]|[acg]ttaccct',
)
SUBST = (
(b'B', b'(c|g|t)'), (b'D', b'(a|g|t)'), (b'H', b'(a|c|t)'),
(b'K', b'(g|t)'), (b'M', b'(a|c)'), (b'N', b'(a|c|g|t)'),
(b'R', b'(a|g)'), (b'S', b'(c|g)'), (b'V', b'(a|c|g)'),
(b'W', b'(a|t)'), (b'Y', b'(c|t)'),
)
def run_benchmarks(seq):
ilen = len(seq)
seq = re.sub(b'>.*\n|\n', b'', seq)
clen = len(seq)
results = []
for f in VARIANTS:
results.append(len(re.findall(f, seq)))
for f, r in SUBST:
seq = re.sub(f, r, seq)
return results, ilen, clen, len(seq)
def bench_regex_dna(loops, seq, expected_res):
range_it = range(loops)
t0 = pyperf.perf_counter()
for i in range_it:
res = run_benchmarks(seq)
dt = pyperf.perf_counter() - t0
if (expected_res is not None) and (res != expected_res):
raise Exception("run_benchmarks() error")
return dt
def add_cmdline_args(cmd, args):
cmd.extend(("--fasta-length", str(args.fasta_length),
"--rng-seed", str(args.rng_seed)))
if __name__ == '__main__':
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata['description'] = ("Test the performance of regexps "
"using benchmarks from "
"The Computer Language Benchmarks Game.")
cmd = runner.argparser
cmd.add_argument("--fasta-length", type=int, default=DEFAULT_INIT_LEN,
help="Length of the fasta sequence "
"(default: %s)" % DEFAULT_INIT_LEN)
cmd.add_argument("--rng-seed", type=int, default=DEFAULT_RNG_SEED,
help="Seed of the random number generator "
"(default: %s)" % DEFAULT_RNG_SEED)
args = runner.parse_args()
if args.fasta_length == 100000:
expected_len = 1016745
expected_res = ([6, 26, 86, 58, 113, 31, 31, 32, 43],
1016745, 1000000, 1336326)
else:
expected_len = None
expected_res = None
runner.metadata['regex_dna_fasta_len'] = args.fasta_length
runner.metadata['regex_dna_rng_seed'] = args.rng_seed
seq = init_benchmarks(args.fasta_length, args.rng_seed)
if (expected_len is not None) and (len(seq) != expected_len):
raise Exception("init_benchmarks() error")
runner.bench_time_func('regex_dna', bench_regex_dna, seq, expected_res)
|
|
# -*- coding: utf-8 -*-
"""
Django settings for icecreamratings project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('icecreamratings_project')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'icecreamratings_project.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'icecreamratings_project.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Pavel Karateev""", 'karateev.pavel@ya.ru'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///icecreamratings_project"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Moscow'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import httplib as http
import logging
import math
import os
import requests
import urllib
import waffle
from django.apps import apps
from django.db.models import Count
from flask import request, send_from_directory, Response, stream_with_context
from framework import sentry
from framework.auth import Auth
from framework.auth.decorators import must_be_logged_in
from framework.auth.forms import SignInForm, ForgotPasswordForm
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.forms import utils as form_utils
from framework.routing import proxy_url
from framework.auth.core import get_current_user_id, _get_current_user
from website import settings
from website.institutions.views import serialize_institution
from osf.models import BaseFileNode, Guid, Institution, PreprintService, AbstractNode, Node
from website.settings import EXTERNAL_EMBER_APPS, PROXY_EMBER_APPS, EXTERNAL_EMBER_SERVER_TIMEOUT, INSTITUTION_DISPLAY_NODE_THRESHOLD, DOMAIN
from website.ember_osf_web.decorators import ember_flag_is_active, MockUser
from website.ember_osf_web.views import use_ember_app
from website.project.model import has_anonymous_link
from osf.utils import permissions
from api.providers.permissions import GroupHelper
logger = logging.getLogger(__name__)
preprints_dir = os.path.abspath(os.path.join(os.getcwd(), EXTERNAL_EMBER_APPS['preprints']['path']))
ember_osf_web_dir = os.path.abspath(os.path.join(os.getcwd(), EXTERNAL_EMBER_APPS['ember_osf_web']['path']))
def serialize_contributors_for_summary(node, max_count=3):
# # TODO: Use .filter(visible=True) when chaining is fixed in django-include
users = [contrib.user for contrib in node.contributor_set.all() if contrib.visible]
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter='surname')
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
def serialize_node_summary(node, auth, primary=True, show_path=False):
is_registration = node.is_registration
summary = {
'id': node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_pending_registration': node.is_pending_registration if is_registration else False,
'is_retracted': node.is_retracted if is_registration else False,
'is_pending_retraction': node.is_pending_retraction if is_registration else False,
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if is_registration and node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo if is_registration else False,
'is_embargoed': node.is_embargoed if is_registration else False,
'archiving': node.archiving if is_registration else False,
}
parent_node = node.parent_node
user = auth.user
if node.can_view(auth):
# Re-query node with contributor guids included to prevent N contributor queries
node = AbstractNode.objects.filter(pk=node.pk).include('contributor__user__guids').get()
contributor_data = serialize_contributors_for_summary(node)
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'isPreprint': bool(node.preprint_file_id),
'childExists': Node.objects.get_children(node, active=True).exists(),
'is_admin': node.has_permission(user, permissions.ADMIN),
'is_contributor': node.is_contributor(user),
'logged_in': auth.logged_in,
'node_type': node.project_or_component,
'is_fork': node.is_fork,
'is_registration': is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'forked_date': node.forked_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_fork
else None,
'ua_count': None,
'ua': None,
'non_ua': None,
'is_public': node.is_public,
'parent_title': parent_node.title if parent_node else None,
'parent_is_public': parent_node.is_public if parent_node else False,
'show_path': show_path,
'contributors': contributor_data['contributors'],
'others_count': contributor_data['others_count'],
'description': node.description if len(node.description) <= 150 else node.description[0:150] + '...',
})
else:
summary['can_view'] = False
return summary
def index():
try: # Check if we're on an institution landing page
#TODO : make this way more robust
institution = Institution.objects.get(domains__contains=[request.host.lower()], is_deleted=False)
inst_dict = serialize_institution(institution)
inst_dict.update({
'home': False,
'institution': True,
'redirect_url': '{}institutions/{}/'.format(DOMAIN, institution._id),
})
return inst_dict
except Institution.DoesNotExist:
pass
return home()
@ember_flag_is_active('ember_home_page')
def home():
user_id = get_current_user_id()
if user_id: # Logged in: return either landing page or user home page
all_institutions = (
Institution.objects.filter(
is_deleted=False,
nodes__is_public=True,
nodes__is_deleted=False,
nodes__type='osf.node'
)
.annotate(Count('nodes'))
.filter(nodes__count__gte=INSTITUTION_DISPLAY_NODE_THRESHOLD)
.order_by('name').only('_id', 'name', 'logo_name')
)
dashboard_institutions = [
{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path_rounded_corners}
for inst in all_institutions
]
return {
'home': True,
'dashboard_institutions': dashboard_institutions,
}
else: # Logged out: return landing page
return {
'home': True,
}
def find_bookmark_collection(user):
Collection = apps.get_model('osf.Collection')
return Collection.objects.get(creator=user, deleted__isnull=True, is_bookmark_collection=True)
@must_be_logged_in
@ember_flag_is_active('ember_dashboard_page')
def dashboard(auth):
return redirect('/')
@ember_flag_is_active('ember_support_page')
def support():
return {}
@must_be_logged_in
@ember_flag_is_active('ember_my_projects_page')
def my_projects(auth):
user = auth.user
bookmark_collection = find_bookmark_collection(user)
my_projects_id = bookmark_collection._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': my_projects_id,
}
def validate_page_num(page, pages):
if page < 0 or (pages and page >= pages):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
def paginate(items, total, page, size):
pages = math.ceil(total / float(size))
validate_page_num(page, pages)
start = page * size
paginated_items = itertools.islice(items, start, start + size)
return paginated_items, pages
def reproducibility():
return redirect('/ezcuj/wiki')
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
if not isinstance(url, unicode):
url = url.decode('utf-8')
return u'/{0}/'.format(url)
def resolve_guid_download(guid, suffix=None, provider=None):
return resolve_guid(guid, suffix='download')
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
try:
# Look up
guid_object = Guid.load(guid)
except KeyError as e:
if e.message == 'osfstorageguidfile': # Used when an old detached OsfStorageGuidFile object is accessed
raise HTTPError(http.NOT_FOUND)
else:
raise e
if guid_object:
# verify that the object implements a GuidStoredObject-like interface. If a model
# was once GuidStoredObject-like but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a deep_url attribute or otherwise don't behave as
# expected.
if not hasattr(guid_object.referent, 'deep_url'):
sentry.log_message(
'Guid resolved to an object with no deep_url', dict(guid=guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http.NOT_FOUND)
# Handle file `/download` shortcut with supported types.
if suffix and suffix.rstrip('/').lower() == 'download':
file_referent = None
if isinstance(referent, PreprintService) and referent.primary_file:
if not referent.is_published:
# TODO: Ideally, permissions wouldn't be checked here.
# This is necessary to prevent a logical inconsistency with
# the routing scheme - if a preprint is not published, only
# admins and moderators should be able to know it exists.
auth = Auth.from_kwargs(request.args.to_dict(), {})
group_helper = GroupHelper(referent.provider)
admin_group = group_helper.get_group('admin')
mod_group = group_helper.get_group('moderator')
# Check if user isn't a nonetype or that the user has admin/moderator permissions
if auth.user is None or not (referent.node.has_permission(auth.user, permissions.ADMIN) or (mod_group.user_set.all() | admin_group.user_set.all()).filter(id=auth.user.id).exists()):
raise HTTPError(http.NOT_FOUND)
file_referent = referent.primary_file
elif isinstance(referent, BaseFileNode) and referent.is_file:
file_referent = referent
if file_referent:
# Extend `request.args` adding `action=download`.
request.args = request.args.copy()
request.args.update({'action': 'download'})
# Do not include the `download` suffix in the url rebuild.
url = _build_guid_url(urllib.unquote(file_referent.deep_url))
return proxy_url(url)
# Handle Ember Applications
if isinstance(referent, PreprintService):
if referent.provider.domain_redirect_enabled:
# This route should always be intercepted by nginx for the branded domain,
# w/ the exception of `<guid>/download` handled above.
return redirect(referent.absolute_url, http.MOVED_PERMANENTLY)
if PROXY_EMBER_APPS:
resp = requests.get(EXTERNAL_EMBER_APPS['preprints']['server'], stream=True, timeout=EXTERNAL_EMBER_SERVER_TIMEOUT)
return Response(stream_with_context(resp.iter_content()), resp.status_code)
return send_from_directory(preprints_dir, 'index.html')
if isinstance(referent, BaseFileNode) and referent.is_file and referent.node.is_quickfiles:
if referent.is_deleted:
raise HTTPError(http.GONE)
if PROXY_EMBER_APPS:
resp = requests.get(EXTERNAL_EMBER_APPS['ember_osf_web']['server'], stream=True, timeout=EXTERNAL_EMBER_SERVER_TIMEOUT)
return Response(stream_with_context(resp.iter_content()), resp.status_code)
return send_from_directory(ember_osf_web_dir, 'index.html')
if isinstance(referent, Node) and not referent.is_registration and suffix:
page = suffix.strip('/').split('/')[0]
flag_name = 'ember_project_{}_page'.format(page)
request.user = _get_current_user() or MockUser()
if waffle.flag_is_active(request, flag_name):
use_ember_app()
url = _build_guid_url(urllib.unquote(referent.deep_url), suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
# Redirects #
# redirect osf.io/about/ to OSF wiki page osf.io/4znzp/wiki/home/
def redirect_about(**kwargs):
return redirect('https://osf.io/4znzp/wiki/home/')
def redirect_help(**kwargs):
return redirect('/faq/')
def redirect_faq(**kwargs):
return redirect('http://help.osf.io/m/faqs/')
# redirect osf.io/howosfworks to osf.io/getting-started/
def redirect_howosfworks(**kwargs):
return redirect('/getting-started/')
# redirect osf.io/getting-started to help.osf.io/
def redirect_getting_started(**kwargs):
return redirect('http://help.osf.io/')
# Redirect to home page
def redirect_to_home():
return redirect('/')
def redirect_to_cos_news(**kwargs):
# Redirect to COS News page
return redirect('https://cos.io/news/')
# Return error for legacy SHARE v1 search route
def legacy_share_v1_search(**kwargs):
return HTTPError(
http.BAD_REQUEST,
data=dict(
message_long='Please use v2 of the SHARE search API available at {}api/v2/share/search/creativeworks/_search.'.format(settings.SHARE_URL)
)
)
|
|
import sys
import json
from torch.utils.data import DataLoader
from sentence_transformers import SentenceTransformer, LoggingHandler, util, models, evaluation, losses, InputExample
import logging
from datetime import datetime
import gzip
import os
import tarfile
import tqdm
from torch.utils.data import Dataset
import random
from shutil import copyfile
import pickle
import argparse
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
parser = argparse.ArgumentParser()
parser.add_argument("--train_batch_size", default=64, type=int)
parser.add_argument("--max_seq_length", default=300, type=int)
parser.add_argument("--model_name", required=True)
parser.add_argument("--max_passages", default=0, type=int)
parser.add_argument("--epochs", default=30, type=int)
parser.add_argument("--pooling", default="mean")
parser.add_argument("--negs_to_use", default=None, help="From which systems should negatives be used? Multiple systems seperated by comma. None = all")
parser.add_argument("--warmup_steps", default=1000, type=int)
parser.add_argument("--lr", default=2e-5, type=float)
parser.add_argument("--num_negs_per_system", default=5, type=int)
parser.add_argument("--use_pre_trained_model", default=False, action="store_true")
parser.add_argument("--use_all_queries", default=False, action="store_true")
args = parser.parse_args()
logging.info(str(args))
# The model we want to fine-tune
train_batch_size = args.train_batch_size #Increasing the train batch size improves the model performance, but requires more GPU memory
model_name = args.model_name
max_passages = args.max_passages
max_seq_length = args.max_seq_length #Max length for passages. Increasing it, requires more GPU memory
num_negs_per_system = args.num_negs_per_system # We used different systems to mine hard negatives. Number of hard negatives to add from each system
num_epochs = args.epochs # Number of epochs we want to train
# Load our embedding model
if args.use_pre_trained_model:
logging.info("use pretrained SBERT model")
model = SentenceTransformer(model_name)
model.max_seq_length = max_seq_length
else:
logging.info("Create new SBERT model")
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), args.pooling)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
model_save_path = f'output/train_bi-encoder-margin_mse-{model_name.replace("/", "-")}-batch_size_{train_batch_size}-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
# Write self to path
os.makedirs(model_save_path, exist_ok=True)
train_script_path = os.path.join(model_save_path, 'train_script.py')
copyfile(__file__, train_script_path)
with open(train_script_path, 'a') as fOut:
fOut.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
### Now we read the MS Marco dataset
data_folder = 'msmarco-data'
#### Read the corpus files, that contain all the passages. Store them in the corpus dict
corpus = {} #dict in the format: passage_id -> passage. Stores all existent passages
collection_filepath = os.path.join(data_folder, 'collection.tsv')
if not os.path.exists(collection_filepath):
tar_filepath = os.path.join(data_folder, 'collection.tar.gz')
if not os.path.exists(tar_filepath):
logging.info("Download collection.tar.gz")
util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
logging.info("Read corpus: collection.tsv")
with open(collection_filepath, 'r', encoding='utf8') as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
pid = int(pid)
corpus[pid] = passage
### Read the train queries, store in queries dict
queries = {} #dict in the format: query_id -> query. Stores all training queries
queries_filepath = os.path.join(data_folder, 'queries.train.tsv')
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, 'queries.tar.gz')
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz', tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, 'r', encoding='utf8') as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
qid = int(qid)
queries[qid] = query
# Load a dict (qid, pid) -> ce_score that maps query-ids (qid) and paragraph-ids (pid)
# to the CrossEncoder score computed by the cross-encoder/ms-marco-MiniLM-L-6-v2 model
ce_scores_file = os.path.join(data_folder, 'cross-encoder-ms-marco-MiniLM-L-6-v2-scores.pkl.gz')
if not os.path.exists(ce_scores_file):
logging.info("Download cross-encoder scores file")
util.http_get('https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives/resolve/main/cross-encoder-ms-marco-MiniLM-L-6-v2-scores.pkl.gz', ce_scores_file)
logging.info("Load CrossEncoder scores dict")
with gzip.open(ce_scores_file, 'rb') as fIn:
ce_scores = pickle.load(fIn)
# As training data we use hard-negatives that have been mined using various systems
hard_negatives_filepath = os.path.join(data_folder, 'msmarco-hard-negatives.jsonl.gz')
if not os.path.exists(hard_negatives_filepath):
logging.info("Download cross-encoder scores file")
util.http_get('https://huggingface.co/datasets/sentence-transformers/msmarco-hard-negatives/resolve/main/msmarco-hard-negatives.jsonl.gz', hard_negatives_filepath)
logging.info("Read hard negatives train file")
train_queries = {}
negs_to_use = None
with gzip.open(hard_negatives_filepath, 'rt') as fIn:
for line in tqdm.tqdm(fIn):
if max_passages > 0 and len(train_queries) >= max_passages:
break
data = json.loads(line)
#Get the positive passage ids
pos_pids = data['pos']
#Get the hard negatives
neg_pids = set()
if negs_to_use is None:
if args.negs_to_use is not None: #Use specific system for negatives
negs_to_use = args.negs_to_use.split(",")
else: #Use all systems
negs_to_use = list(data['neg'].keys())
logging.info("Using negatives from the following systems:", negs_to_use)
for system_name in negs_to_use:
if system_name not in data['neg']:
continue
system_negs = data['neg'][system_name]
negs_added = 0
for pid in system_negs:
if pid not in neg_pids:
neg_pids.add(pid)
negs_added += 1
if negs_added >= num_negs_per_system:
break
if args.use_all_queries or (len(pos_pids) > 0 and len(neg_pids) > 0):
train_queries[data['qid']] = {'qid': data['qid'], 'query': queries[data['qid']], 'pos': pos_pids, 'neg': neg_pids}
logging.info("Train queries: {}".format(len(train_queries)))
# We create a custom MSMARCO dataset that returns triplets (query, positive, negative)
# on-the-fly based on the information from the mined-hard-negatives jsonl file.
class MSMARCODataset(Dataset):
def __init__(self, queries, corpus, ce_scores):
self.queries = queries
self.queries_ids = list(queries.keys())
self.corpus = corpus
self.ce_scores = ce_scores
for qid in self.queries:
self.queries[qid]['pos'] = list(self.queries[qid]['pos'])
self.queries[qid]['neg'] = list(self.queries[qid]['neg'])
random.shuffle(self.queries[qid]['neg'])
def __getitem__(self, item):
query = self.queries[self.queries_ids[item]]
query_text = query['query']
qid = query['qid']
if len(query['pos']) > 0:
pos_id = query['pos'].pop(0) #Pop positive and add at end
pos_text = self.corpus[pos_id]
query['pos'].append(pos_id)
else: #We only have negatives, use two negs
pos_id = query['neg'].pop(0) #Pop negative and add at end
pos_text = self.corpus[pos_id]
query['neg'].append(pos_id)
#Get a negative passage
neg_id = query['neg'].pop(0) #Pop negative and add at end
neg_text = self.corpus[neg_id]
query['neg'].append(neg_id)
pos_score = self.ce_scores[qid][pos_id]
neg_score = self.ce_scores[qid][neg_id]
return InputExample(texts=[query_text, pos_text, neg_text], label=pos_score-neg_score)
def __len__(self):
return len(self.queries)
# For training the SentenceTransformer model, we need a dataset, a dataloader, and a loss used for training.
train_dataset = MSMARCODataset(queries=train_queries, corpus=corpus, ce_scores=ce_scores)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MarginMSELoss(model=model)
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=args.warmup_steps,
use_amp=True,
checkpoint_path=model_save_path,
checkpoint_save_steps=10000,
optimizer_params = {'lr': args.lr},
)
# Train latest model
model.save(model_save_path)
|
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2007 Simon Willison
# Copyright (C) 2008-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import urllib
from urlparse import urlsplit
from django.conf import settings
from django.contrib.auth import (
REDIRECT_FIELD_NAME, authenticate, login as auth_login)
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
from openid.consumer.consumer import (
Consumer, SUCCESS, CANCEL, FAILURE)
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg, ax, pape
from django_openid_auth import teams
from django_openid_auth.forms import OpenIDLoginForm
from django_openid_auth.models import UserOpenID
from django_openid_auth.signals import openid_login_complete
from django_openid_auth.store import DjangoOpenIDStore
from django_openid_auth.exceptions import (
RequiredAttributeNotReturned,
DjangoOpenIDException,
)
next_url_re = re.compile('^/[-\w/]+$')
def is_valid_next_url(next):
# When we allow this:
# /openid/?next=/welcome/
# For security reasons we want to restrict the next= bit to being a local
# path, not a complete URL.
return bool(next_url_re.match(next))
def sanitise_redirect_url(redirect_to):
"""Sanitise the redirection URL."""
# Light security check -- make sure redirect_to isn't garbage.
is_valid = True
if not redirect_to or ' ' in redirect_to:
is_valid = False
elif '//' in redirect_to:
# Allow the redirect URL to be external if it's a permitted domain
allowed_domains = getattr(settings,
"ALLOWED_EXTERNAL_OPENID_REDIRECT_DOMAINS", [])
s, netloc, p, q, f = urlsplit(redirect_to)
# allow it if netloc is blank or if the domain is allowed
if netloc:
# a domain was specified. Is it an allowed domain?
if netloc.find(":") != -1:
netloc, _ = netloc.split(":", 1)
if netloc not in allowed_domains:
is_valid = False
# If the return_to URL is not valid, use the default.
if not is_valid:
redirect_to = settings.LOGIN_REDIRECT_URL
return redirect_to
def make_consumer(request):
"""Create an OpenID Consumer object for the given Django request."""
# Give the OpenID library its own space in the session object.
session = request.session.setdefault('OPENID', {})
store = DjangoOpenIDStore()
return Consumer(session, store)
def render_openid_request(request, openid_request, return_to, trust_root=None):
"""Render an OpenID authentication request."""
if trust_root is None:
trust_root = getattr(settings, 'OPENID_TRUST_ROOT',
request.build_absolute_uri('/'))
if openid_request.shouldSendRedirect():
redirect_url = openid_request.redirectURL(
trust_root, return_to)
return HttpResponseRedirect(redirect_url)
else:
form_html = openid_request.htmlMarkup(
trust_root, return_to, form_tag_attrs={'id': 'openid_message'})
return HttpResponse(form_html, content_type='text/html;charset=UTF-8')
def default_render_failure(request, message, status=403,
template_name='openid/failure.html',
exception=None):
"""Render an error page to the user."""
data = render_to_string(
template_name, dict(message=message, exception=exception),
context_instance=RequestContext(request))
return HttpResponse(data, status=status)
def parse_openid_response(request):
"""Parse an OpenID response from a Django request."""
# Short cut if there is no request parameters.
#if len(request.REQUEST) == 0:
# return None
current_url = request.build_absolute_uri()
consumer = make_consumer(request)
return consumer.complete(dict(request.REQUEST.items()), current_url)
def login_begin(request, template_name='openid/login.html',
login_complete_view='openid-complete',
form_class=OpenIDLoginForm,
render_failure=default_render_failure,
redirect_field_name=REDIRECT_FIELD_NAME):
"""Begin an OpenID login request, possibly asking for an identity URL."""
redirect_to = request.REQUEST.get(redirect_field_name, '')
# Get the OpenID URL to try. First see if we've been configured
# to use a fixed server URL.
openid_url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
# We may have included a configurable GET parameter to inject into the
# OPENID_SSO_SERVER_URL. Inject it if it exists. If the configuration is
# there, but the actual parameter is not, proceed as if no
# OPENID_SSO_SERVER_URL was specified.
if (openid_url and hasattr(settings, 'OPENID_INJECT_GET_PARAM')):
param_value = request.GET.get(
getattr(settings, 'OPENID_INJECT_GET_PARAM'))
if param_value:
openid_url = openid_url % param_value
else:
openid_url = None
if openid_url is None:
if request.POST:
login_form = form_class(data=request.POST)
if login_form.is_valid():
openid_url = login_form.cleaned_data['openid_identifier']
else:
login_form = form_class()
# Invalid or no form data:
if openid_url is None:
return render_to_response(template_name, {
'form': login_form,
redirect_field_name: redirect_to
}, context_instance=RequestContext(request))
error = None
consumer = make_consumer(request)
try:
openid_request = consumer.begin(openid_url)
except DiscoveryFailure, exc:
return render_failure(
request, "OpenID discovery error: %s" % (str(exc),), status=500,
exception=exc)
# Request some user details. If the provider advertises support
# for attribute exchange, use that.
if openid_request.endpoint.supportsType(ax.AXMessage.ns_uri):
fetch_request = ax.FetchRequest()
# We mark all the attributes as required, since Google ignores
# optional attributes. We request both the full name and
# first/last components since some providers offer one but not
# the other.
for (attr, alias) in [
('http://axschema.org/contact/email', 'email'),
('http://axschema.org/namePerson', 'fullname'),
('http://axschema.org/namePerson/first', 'firstname'),
('http://axschema.org/namePerson/last', 'lastname'),
('http://axschema.org/namePerson/friendly', 'nickname'),
# The myOpenID provider advertises AX support, but uses
# attribute names from an obsolete draft of the
# specification. We request them for compatibility.
('http://schema.openid.net/contact/email', 'old_email'),
('http://schema.openid.net/namePerson', 'old_fullname'),
('http://schema.openid.net/namePerson/friendly', 'old_nickname')]:
fetch_request.add(ax.AttrInfo(attr, alias=alias, required=True))
openid_request.addExtension(fetch_request)
else:
sreg_required_fields = []
sreg_required_fields.extend(
getattr(settings, 'OPENID_SREG_REQUIRED_FIELDS', []))
sreg_optional_fields = ['email', 'fullname', 'nickname']
sreg_optional_fields.extend(
getattr(settings, 'OPENID_SREG_EXTRA_FIELDS', []))
sreg_optional_fields = [
field for field in sreg_optional_fields if (
not field in sreg_required_fields)]
openid_request.addExtension(
sreg.SRegRequest(optional=sreg_optional_fields,
required=sreg_required_fields))
if getattr(settings, 'OPENID_PHYSICAL_MULTIFACTOR_REQUIRED', False):
preferred_auth = [
pape.AUTH_MULTI_FACTOR_PHYSICAL,
]
pape_request = pape.Request(preferred_auth_policies=preferred_auth)
openid_request.addExtension(pape_request)
# Request team info
teams_mapping_auto = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING_AUTO', False)
teams_mapping_auto_blacklist = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING_AUTO_BLACKLIST', [])
launchpad_teams = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING', {})
if teams_mapping_auto:
#ignore launchpad teams. use all django-groups
launchpad_teams = dict()
all_groups = Group.objects.exclude(name__in=teams_mapping_auto_blacklist)
for group in all_groups:
launchpad_teams[group.name] = group.name
if launchpad_teams:
openid_request.addExtension(teams.TeamsRequest(launchpad_teams.keys()))
# Construct the request completion URL, including the page we
# should redirect to.
return_to = request.build_absolute_uri(reverse(login_complete_view))
if redirect_to:
if '?' in return_to:
return_to += '&'
else:
return_to += '?'
return_to += urllib.urlencode({redirect_field_name: redirect_to})
return render_openid_request(request, openid_request, return_to)
@csrf_exempt
def login_complete(request, redirect_field_name=REDIRECT_FIELD_NAME,
render_failure=None):
redirect_to = request.REQUEST.get(redirect_field_name, '')
render_failure = render_failure or \
getattr(settings, 'OPENID_RENDER_FAILURE', None) or \
default_render_failure
openid_response = parse_openid_response(request)
if not openid_response:
return render_failure(
request, 'This is an OpenID relying party endpoint.')
if openid_response.status == SUCCESS:
try:
user = authenticate(openid_response=openid_response)
except DjangoOpenIDException, e:
return render_failure(request, e.message, exception=e)
if user is not None:
if user.is_active:
auth_login(request, user)
response = HttpResponseRedirect(sanitise_redirect_url(redirect_to))
# Notify any listeners that we successfully logged in.
openid_login_complete.send(sender=UserOpenID, request=request,
openid_response=openid_response)
return response
else:
return render_failure(request, 'Disabled account')
else:
return render_failure(request, 'Unknown user')
elif openid_response.status == FAILURE:
return render_failure(
request, 'OpenID authentication failed: %s' %
openid_response.message)
elif openid_response.status == CANCEL:
return render_failure(request, 'Authentication cancelled')
else:
assert False, (
"Unknown OpenID response type: %r" % openid_response.status)
def logo(request):
return HttpResponse(
OPENID_LOGO_BASE_64.decode('base64'), mimetype='image/gif'
)
# Logo from http://openid.net/login-bg.gif
# Embedded here for convenience; you should serve this as a static file
OPENID_LOGO_BASE_64 = """
R0lGODlhEAAQAMQAAO3t7eHh4srKyvz8/P5pDP9rENLS0v/28P/17tXV1dHEvPDw8M3Nzfn5+d3d
3f5jA97Syvnv6MfLzcfHx/1mCPx4Kc/S1Pf189C+tP+xgv/k1N3OxfHy9NLV1/39/f///yH5BAAA
AAAALAAAAAAQABAAAAVq4CeOZGme6KhlSDoexdO6H0IUR+otwUYRkMDCUwIYJhLFTyGZJACAwQcg
EAQ4kVuEE2AIGAOPQQAQwXCfS8KQGAwMjIYIUSi03B7iJ+AcnmclHg4TAh0QDzIpCw4WGBUZeikD
Fzk0lpcjIQA7
"""
|
|
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
from collections import deque
except ImportError:
from utils import deque
from constants import contentModelFlags, spaceCharacters
from constants import entitiesWindows1252, entities
from constants import asciiLowercase, asciiLetters, asciiUpper2Lower
from constants import digits, hexDigits, EOF
from constants import tokenTypes, tagTokenTypes
from constants import replacementCharacters
from inputstream import HTMLInputStream
# Group entities by their first character, for faster lookups
entitiesByFirstChar = {}
for e in entities:
entitiesByFirstChar.setdefault(e[0], []).append(e)
class HTMLTokenizer:
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
# XXX need to fix documentation
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
#Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.contentModelFlag = contentModelFlags["PCDATA"]
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = u"\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
#Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# XXX We should have a separate function that does "int" to
# "unicodestring" conversion since this doesn't always work
# according to hsivonen. Also, unichr has a limitation of 65535
char = unichr(charAsInt)
except:
try:
char = eval("u'\\U%08x'" % charAsInt)
except:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"cant-convert-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != u";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = u"&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, u"<", u"&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == u"#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in (u"x", u"X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
filteredEntityList = entitiesByFirstChar.get(charStack[0], [])
def entitiesStartingWith(name):
return [e for e in filteredEntityList if e.startswith(name)]
while charStack[-1] is not EOF and\
entitiesStartingWith("".join(charStack)):
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
entityName = None
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
for entityLength in xrange(len(charStack)-1, 1, -1):
possibleEntityName = "".join(charStack[:entityLength])
if possibleEntityName in entities:
entityName = possibleEntityName
break
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if entityName[-1] != ";" and fromAttribute and \
(charStack[entityLength] in asciiLetters
or charStack[entityLength] in digits):
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += u"".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
#XXX - consider splitting this state based on the content model flag
data = self.stream.char()
# Keep a charbuffer to handle the escapeFlag
if (self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"])):
if len(self.lastFourChars) == 4:
self.lastFourChars.pop(0)
self.lastFourChars.append(data)
# The rest of the logic
if (data == "&" and self.contentModelFlag in
(contentModelFlags["PCDATA"], contentModelFlags["RCDATA"]) and
not self.escapeFlag):
self.state = self.entityDataState
elif (data == "-" and self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and
not self.escapeFlag and "".join(self.lastFourChars) == "<!--"):
self.escapeFlag = True
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data":data})
elif (data == "<" and (self.contentModelFlag ==
contentModelFlags["PCDATA"]
or (self.contentModelFlag in
(contentModelFlags["CDATA"],
contentModelFlags["RCDATA"]) and
self.escapeFlag == False))):
self.state = self.tagOpenState
elif (data == ">" and self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and
self.escapeFlag and "".join(self.lastFourChars)[1:] == "-->"):
self.escapeFlag = False
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":data})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
if (self.contentModelFlag in
(contentModelFlags["CDATA"], contentModelFlags["RCDATA"])):
chars = self.stream.charsUntil((u"&", u"<", u">", u"-"))
self.lastFourChars += chars[-4:]
self.lastFourChars = self.lastFourChars[-4:]
else:
chars = self.stream.charsUntil((u"&", u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def tagOpenState(self):
data = self.stream.char()
if self.contentModelFlag == contentModelFlags["PCDATA"]:
if data == u"!":
self.state = self.markupDeclarationOpenState
elif data == u"/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == u">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<>"})
self.state = self.dataState
elif data == u"?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.dataState
else:
# We know the content model flag is set to either RCDATA or CDATA
# now because this state can never be entered with the PLAINTEXT
# flag.
if data == u"/":
self.state = self.closeTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
if (self.contentModelFlag in (contentModelFlags["RCDATA"],
contentModelFlags["CDATA"])):
charStack = []
if self.currentToken:
# So far we know that "</" has been consumed. We now need to know
# whether the next few characters match the name of last emitted
# start tag which also happens to be the currentToken.
matched = True
for expected in self.currentToken["name"].lower():
charStack.append(self.stream.char())
if charStack[-1] not in (expected, expected.upper()):
matched = False
break
# If the tag name prefix matched, we also need to check the
# subsequent character
if matched:
charStack.append(self.stream.char())
if charStack[-1] in (spaceCharacters | frozenset((u">", u"/", EOF))):
self.contentModelFlag = contentModelFlags["PCDATA"]
# Unget the last character, so it can be re-processed
# in the next state
self.stream.unget(charStack.pop())
# The remaining characters in charStack are the tag name
self.currentToken = {"type": tokenTypes["EndTag"],
"name": u"".join(charStack),
"data": [],
"selfClosing":False}
self.state = self.tagNameState
return True
# Didn't find the end tag. The last character in charStack could be
# anything, so it has to be re-processed in the data state
self.stream.unget(charStack.pop())
# The remaining characters are a prefix of the tag name, so they're
# just letters and digits, so they can be output as character
# tokens immediately
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</" + u"".join(charStack)})
self.state = self.dataState
return True
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing":False}
self.state = self.tagNameState
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == u"/":
self.state = self.selfClosingStartTagState
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"=", u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == u"=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == u">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-name"})
self.state = self.dataState
emitToken = True
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"=":
self.state = self.beforeAttributeValueState
elif data == u">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"\"":
self.state = self.attributeValueDoubleQuotedState
elif data == u"&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data);
elif data == u"'":
self.state = self.attributeValueSingleQuotedState
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data in (u"=", u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == u"&":
self.processEntityInAttribute(u'"')
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", u"&"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == u"&":
self.processEntityInAttribute(u"'")
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", u"&"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u"&":
self.processEntityInAttribute(">")
elif data == u">":
self.emitCurrentToken()
elif data in (u'"', u"'", u"=", u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil( \
frozenset(("&", ">", "<", "=", "'", '"')) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.emitCurrentToken()
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-soldius-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": self.stream.charsUntil(u">")})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def bogusCommentContinuationState(self):
# Like bogusCommentState, but the caller must create the comment token
# and this state just adds more characters to it
self.currentToken["data"] += self.stream.charsUntil(u">")
self.tokenQueue.append(self.currentToken)
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == u"-":
charStack.append(self.stream.char())
if charStack[-1] == u"-":
self.currentToken = {"type": tokenTypes["Comment"], "data": u""}
self.state = self.commentStartState
return True
elif charStack[-1] in (u'd', u'D'):
matched = True
for expected in ((u'o', u'O'), (u'c', u'C'), (u't', u'T'),
(u'y', u'Y'), (u'p', u'P'), (u'e', u'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": u"",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
# charStack[:-2] consists of 'safe' characters ('-', 'd', 'o', etc)
# so they can be copied directly into the bogus comment data, and only
# the last character might be '>' or EOF and needs to be ungetted
self.stream.unget(charStack.pop())
self.currentToken = {"type": tokenTypes["Comment"],
"data": u"".join(charStack)}
self.state = self.bogusCommentContinuationState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data + self.stream.charsUntil(u"-")
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == u"-":
self.state = self.commentEndDashState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
return True
def commentEndDashState(self):
data = self.stream.char()
if data == u"-":
self.state = self.commentEndState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += u"-" + data +\
self.stream.charsUntil(u"-")
# Consume the next character which is either a "-" or an EOF as
# well so if there's a "-" directly after the "-" we go nicely to
# the "comment end state" without emitting a ParseError() there.
self.stream.char()
return True
def commentEndState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data in spaceCharacters:
self.currentToken["data"] += "--" + data
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-space-after-double-dash-in-comment"})
self.state = self.commentEndSpaceState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += u"--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += u"--!" + data
self.state = self.commentState
return True
def commentEndSpaceState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.state = self.commentEndDashState
elif data in spaceCharacters:
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-space-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == u">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in (u"p", u"P"):
matched = True
for expected in ((u"u", u"U"), (u"b", u"B"), (u"l", u"L"),
(u"i", u"I"), (u"c", u"C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.beforeDoctypePublicIdentifierState
return True
elif data in (u"s", u"S"):
matched = True
for expected in ((u"y", u"Y"), (u"s", u"S"), (u"t", u"T"),
(u"e", u"E"), (u"m", u"M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.beforeDoctypeSystemIdentifierState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = u""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = u""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'PendingProjectMember', fields ['project', 'email']
db.delete_unique('sentry_pendingprojectmember', ['project_id', 'email'])
# Deleting model 'PendingProjectMember'
db.delete_table('sentry_pendingprojectmember')
# Adding model 'PendingTeamMember'
db.create_table('sentry_pendingteammember', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(related_name='pending_member_set', to=orm['sentry.Team'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['PendingTeamMember'])
# Adding unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.create_unique('sentry_pendingteammember', ['team_id', 'email'])
def backwards(self, orm):
# Removing unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.delete_unique('sentry_pendingteammember', ['team_id', 'email'])
# Adding model 'PendingProjectMember'
db.create_table('sentry_pendingprojectmember', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='pending_member_set', to=orm['sentry.Project'])),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('sentry', ['PendingProjectMember'])
# Adding unique constraint on 'PendingProjectMember', fields ['project', 'email']
db.create_unique('sentry_pendingprojectmember', ['project_id', 'email'])
# Deleting model 'PendingTeamMember'
db.delete_table('sentry_pendingteammember')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 29, 45, 137609)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 29, 45, 137481)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
|
|
"""
Tests for youtube api
"""
import random
import string
import pytest
from googleapiclient.errors import HttpError
from cloudsync.conftest import MockHttpErrorResponse
from cloudsync.youtube import YouTubeApi, YouTubeUploadException, strip_bad_chars
from ui.constants import VideoStatus
from ui.factories import VideoFileFactory, VideoSubtitleFactory, VideoFactory
pytestmark = pytest.mark.django_db
# pylint: disable=redefined-outer-name,unused-argument,no-value-for-parameter,unused-variable
def test_youtube_settings(mocker, settings):
"""
Test that Youtube object creation uses YT_* settings for credentials
"""
settings.YT_ACCESS_TOKEN = "yt_access_token"
settings.YT_CLIENT_ID = "yt_client_id"
settings.YT_CLIENT_SECRET = "yt_secret"
settings.YT_REFRESH_TOKEN = "yt_refresh"
mock_oauth = mocker.patch("cloudsync.youtube.oauth2client.client.GoogleCredentials")
YouTubeApi()
mock_oauth.assert_called_with(
settings.YT_ACCESS_TOKEN,
settings.YT_CLIENT_ID,
settings.YT_CLIENT_SECRET,
settings.YT_REFRESH_TOKEN,
None,
"https://accounts.google.com/o/oauth2/token",
None,
)
def test_upload_video(mocker):
"""
Test that the upload_video task calls the YouTube API execute method
"""
videofile = VideoFileFactory()
youtube_id = "M6LymW_8qVk"
video_upload_response = {
"id": youtube_id,
"kind": "youtube#video",
"snippet": {"description": "Testing description", "title": "Testing123"},
"status": {"uploadStatus": "uploaded"},
}
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().videos.return_value.insert.return_value.next_chunk.side_effect = [
(None, None),
(None, video_upload_response),
]
response = YouTubeApi().upload_video(videofile.video)
assert response == video_upload_response
def test_upload_video_no_id(mocker):
"""
Test that the upload_video task fails if the response contains no id
"""
videofile = VideoFileFactory()
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().videos.return_value.insert.return_value.next_chunk.return_value = (
None,
{},
)
with pytest.raises(YouTubeUploadException):
YouTubeApi().upload_video(videofile.video)
@pytest.mark.parametrize(
["error", "retryable"],
[
[HttpError(MockHttpErrorResponse(500), b""), True],
[HttpError(MockHttpErrorResponse(403), b""), False],
[OSError, True],
[IndexError, False],
],
)
def test_upload_errors_retryable(mocker, error, retryable):
"""
Test that uploads are retried 10x for retryable exceptions
"""
youtube_mocker = mocker.patch("cloudsync.youtube.build")
mocker.patch("cloudsync.youtube.time")
videofile = VideoFileFactory()
youtube_mocker().videos.return_value.insert.return_value.next_chunk.side_effect = (
error
)
with pytest.raises(Exception) as exc:
YouTubeApi().upload_video(videofile.video)
assert str(exc.value).startswith("Retried YouTube upload 10x") == retryable
def test_upload_video_long_fields(mocker):
"""
Test that the upload_youtube_video task truncates title and description if too long
"""
title = "".join(random.choice(string.ascii_lowercase) for c in range(105))
desc = "".join(random.choice(string.ascii_lowercase) for c in range(5005))
video = VideoFactory.create(
title=title, description=desc, is_public=True, status=VideoStatus.COMPLETE
)
VideoFileFactory(video=video)
mocker.patch("cloudsync.youtube.resumable_upload")
youtube_mocker = mocker.patch("cloudsync.youtube.build")
mock_upload = youtube_mocker().videos.return_value.insert
YouTubeApi().upload_video(video)
called_args, called_kwargs = mock_upload.call_args
assert called_kwargs["body"]["snippet"]["title"] == title[:100]
assert called_kwargs["body"]["snippet"]["description"] == desc[:5000]
def test_upload_caption_calls_insert(mocker):
"""
Test that the upload_caption task calls insert_caption for a YouTube video if no caption for that language exists
"""
subtitle = VideoSubtitleFactory()
caption_id = "foo"
caption_response = {"id": caption_id}
mocker.patch("cloudsync.youtube.YouTubeApi.list_captions", return_value={})
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().captions.return_value.insert.return_value.next_chunk.return_value = (
None,
caption_response,
)
response = YouTubeApi().upload_caption(subtitle, caption_id)
assert response == caption_response
def test_upload_caption_calls_update(mocker):
"""
Test that the upload_caption task calls update_caption for a YouTube video if a caption for that language exists
"""
subtitle = VideoSubtitleFactory()
caption_id = "bar"
caption_response = {"id": caption_id}
mocker.patch(
"cloudsync.youtube.YouTubeApi.list_captions", return_value={"en": caption_id}
)
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().captions.return_value.update.return_value.next_chunk.return_value = (
None,
caption_response,
)
response = YouTubeApi().upload_caption(subtitle, caption_id)
assert response == caption_response
def test_delete_video(mocker):
"""
Test that the 'delete_video' method executes a YouTube API deletion request and returns the status code
"""
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().videos.return_value.delete.return_value.execute.return_value = 204
assert YouTubeApi().delete_video("foo") == 204
youtube_mocker().videos.return_value.delete.assert_called_with(id="foo")
def test_list_captions(mocker):
"""
Test that the 'list_captions' method executes a YouTube API request and returns a dict with correct values
"""
key = "en"
value = "Srnr982VEC79QzEBGcBOL_UFmu9U2e-JgOw-EWIxJXEB5Bjltl3Yvg="
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().captions.return_value.list.return_value.execute.return_value = {
"etag": "foo",
"items": [
{
"id": value,
"kind": "youtube#caption",
"snippet": {
"audioTrackType": "unknown",
"language": key,
"lastUpdated": "2017-11-15T14:53:21.839Z",
"name": "English",
"videoId": "3h-0mkTVbRg",
},
}
],
"kind": "youtube#captionListResponse",
}
assert YouTubeApi().list_captions("foo") == {key: value}
youtube_mocker().captions.return_value.list.assert_called_once_with(
videoId="foo", part="snippet"
)
def test_delete_caption(mocker):
"""
Test that the 'delete_caption' method executes a YouTube API deletion request and returns the status code
"""
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().captions.return_value.delete.return_value.execute.return_value = (
204
)
assert YouTubeApi().delete_caption("foo") == 204
youtube_mocker().captions.return_value.delete.assert_called_once_with(id="foo")
def test_video_status(mocker):
"""
Test that the 'video_status' method returns the correct value from the API response
"""
expected_status = "processed"
youtube_mocker = mocker.patch("cloudsync.youtube.build")
youtube_mocker().videos.return_value.list.return_value.execute.return_value = {
"etag": '"ld9biNPKjAjgjV7EZ4EKeEGrhao/Lf7oS5V-Gjw0XHBBKFJRpn60z3w"',
"items": [
{
"etag": '"ld9biNPKjAjgjV7EZ4EKeEGrhao/-UL82wRXbq3YJiMZuZpqCWKoq6Q"',
"id": "wAjoqsZng_M",
"kind": "youtube#video",
"status": {
"embeddable": True,
"license": "youtube",
"privacyStatus": "unlisted",
"publicStatsViewable": True,
"uploadStatus": expected_status,
},
}
],
"kind": "youtube#videoListResponse",
"pageInfo": {"resultsPerPage": 1, "totalResults": 1},
}
assert YouTubeApi().video_status("foo") == expected_status
youtube_mocker().videos.return_value.list.assert_called_once_with(
id="foo", part="status"
)
def test_strip_bad_chars():
"""
Test that `<`,`>` characters are removed from text
"""
assert strip_bad_chars("<OV>S>") == "OVS"
|
|
from direct.actor import Actor
from direct.directnotify.DirectNotifyGlobal import *
from direct.distributed import ClockDelta
from direct.interval.IntervalGlobal import *
from direct.showbase import PythonUtil
from direct.showutil import Rope
from direct.task import Task
from pandac.PandaModules import *
import DistributedFurnitureItem
import PhoneGlobals
from toontown.catalog import CatalogItem
from toontown.catalog.CatalogGUI import CatalogGUI
from toontown.catalog.CatalogItemListGUI import CatalogItemListGUI
from toontown.catalog.CatalogItemSorter import CatalogItemSorter
from toontown.quest import Quests
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toontowngui import TTDialog
class DistributedPhone(DistributedFurnitureItem.DistributedFurnitureItem):
notify = directNotify.newCategory('DistributedPhone')
movieDelay = 0.5
def __init__(self, cr):
DistributedFurnitureItem.DistributedFurnitureItem.__init__(self, cr)
self.lastAvId = 0
self.hasLocalAvatar = 0
self.lastTime = 0
self.initialScale = None
self.usedInitialScale = 0
self.toonScale = None
self.phoneGui = None
self.phoneDialog = None
self.model = None
self.cord = None
self.receiverGeom = None
self.receiverJoint = None
self.phoneSphereEvent = 'phoneSphere'
self.phoneSphereEnterEvent = 'enter' + self.phoneSphereEvent
self.phoneGuiDoneEvent = 'phoneGuiDone'
self.pickupMovieDoneEvent = 'phonePickupDone'
self.numHouseItems = None
self.interval = None
self.intervalAvatar = None
self.phoneInUse = 0
self.origToonHpr = None
def announceGenerate(self):
self.notify.debug('announceGenerate')
DistributedFurnitureItem.DistributedFurnitureItem.announceGenerate(self)
self.accept(self.phoneSphereEnterEvent, self.__handleEnterSphere)
self.load()
taskMgr.doMethodLater(6, self.ringIfHasPhoneQuest, self.uniqueName('ringDoLater'))
def loadModel(self):
self.model = Actor.Actor('phase_5.5/models/estate/prop_phone-mod', {'SS_phoneOut': 'phase_5.5/models/estate/prop_phone-SS_phoneOut',
'SS_takePhone': 'phase_5.5/models/estate/prop_phone-SS_takePhone',
'SS_phoneNeutral': 'phase_5.5/models/estate/prop_phone-SS_phoneNeutral',
'SS_phoneBack': 'phase_5.5/models/estate/prop_phone-SS_phoneBack',
'SM_phoneOut': 'phase_5.5/models/estate/prop_phone-SM_phoneOut',
'SM_takePhone': 'phase_5.5/models/estate/prop_phone-SM_takePhone',
'SM_phoneNeutral': 'phase_5.5/models/estate/prop_phone-SM_phoneNeutral',
'SM_phoneBack': 'phase_5.5/models/estate/prop_phone-SM_phoneBack',
'SL_phoneOut': 'phase_5.5/models/estate/prop_phone-SL_phoneOut',
'SL_takePhone': 'phase_5.5/models/estate/prop_phone-SL_takePhone',
'SL_phoneNeutral': 'phase_5.5/models/estate/prop_phone-SL_phoneNeutral',
'SL_phoneBack': 'phase_5.5/models/estate/prop_phone-SL_phoneBack',
'MS_phoneOut': 'phase_5.5/models/estate/prop_phone-MS_phoneOut',
'MS_takePhone': 'phase_5.5/models/estate/prop_phone-MS_takePhone',
'MS_phoneNeutral': 'phase_5.5/models/estate/prop_phone-MS_phoneNeutral',
'MS_phoneBack': 'phase_5.5/models/estate/prop_phone-MS_phoneBack',
'MM_phoneOut': 'phase_5.5/models/estate/prop_phone-MM_phoneOut',
'MM_takePhone': 'phase_5.5/models/estate/prop_phone-MM_takePhone',
'MM_phoneNeutral': 'phase_5.5/models/estate/prop_phone-MM_phoneNeutral',
'MM_phoneBack': 'phase_5.5/models/estate/prop_phone-MM_phoneBack',
'ML_phoneOut': 'phase_5.5/models/estate/prop_phone-ML_phoneOut',
'ML_takePhone': 'phase_5.5/models/estate/prop_phone-ML_takePhone',
'ML_phoneNeutral': 'phase_5.5/models/estate/prop_phone-ML_phoneNeutral',
'ML_phoneBack': 'phase_5.5/models/estate/prop_phone-ML_phoneBack',
'LS_phoneOut': 'phase_5.5/models/estate/prop_phone-LS_phoneOut',
'LS_takePhone': 'phase_5.5/models/estate/prop_phone-LS_takePhone',
'LS_phoneNeutral': 'phase_5.5/models/estate/prop_phone-LS_phoneNeutral',
'LS_phoneBack': 'phase_5.5/models/estate/prop_phone-LS_phoneBack',
'LM_phoneOut': 'phase_5.5/models/estate/prop_phone-LM_phoneOut',
'LM_takePhone': 'phase_5.5/models/estate/prop_phone-LM_takePhone',
'LM_phoneNeutral': 'phase_5.5/models/estate/prop_phone-LM_phoneNeutral',
'LM_phoneBack': 'phase_5.5/models/estate/prop_phone-LM_phoneBack',
'LL_phoneOut': 'phase_5.5/models/estate/prop_phone-LL_phoneOut',
'LL_takePhone': 'phase_5.5/models/estate/prop_phone-LL_takePhone',
'LL_phoneNeutral': 'phase_5.5/models/estate/prop_phone-LL_phoneNeutral',
'LL_phoneBack': 'phase_5.5/models/estate/prop_phone-LL_phoneBack'})
self.model.pose('SS_phoneOut', 0)
self.receiverJoint = self.model.find('**/joint_receiver')
self.receiverGeom = self.receiverJoint.getChild(0)
mount = loader.loadModel('phase_5.5/models/estate/phoneMount-mod')
mount.setTransparency(0, 1)
self.model.reparentTo(mount)
self.ringSfx = loader.loadSfx('phase_3.5/audio/sfx/telephone_ring.ogg')
self.handleSfx = loader.loadSfx('phase_5.5/audio/sfx/telephone_handle2.ogg')
self.hangUpSfx = loader.loadSfx('phase_5.5/audio/sfx/telephone_hang_up.ogg')
self.pickUpSfx = loader.loadSfx('phase_5.5/audio/sfx/telephone_pickup1.ogg')
if self.initialScale:
mount.setScale(*self.initialScale)
self.usedInitialScale = 1
phoneSphere = CollisionSphere(0, -0.66, 0, 0.2)
phoneSphere.setTangible(0)
phoneSphereNode = CollisionNode(self.phoneSphereEvent)
phoneSphereNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
phoneSphereNode.addSolid(phoneSphere)
mount.attachNewNode(phoneSphereNode)
if not self.model.find('**/CurveNode7').isEmpty():
self.setupCord()
return mount
def setupCamera(self, mode):
base.camera.wrtReparentTo(render)
if mode == PhoneGlobals.PHONE_MOVIE_PICKUP:
base.camera.posQuatInterval(1, (4, -4, base.localAvatar.getHeight()- 0.5), (35, -8, 0), other=base.localAvatar, blendType='easeOut').start()
def setupCord(self):
if self.cord:
self.cord.detachNode()
self.cord = None
self.cord = Rope.Rope(self.uniqueName('phoneCord'))
self.cord.setColor(0, 0, 0, 1)
self.cord.setup(4, ((self.receiverGeom, (0, 0, 0)),
(self.model.find('**/joint_curveNode1'), (0, 0, 0)),
(self.model.find('**/joint_curveNode2'), (0, 0, 0)),
(self.model.find('**/joint_curveNode3'), (0, 0, 0)),
(self.model.find('**/joint_curveNode4'), (0, 0, 0)),
(self.model.find('**/joint_curveNode5'), (0, 0, 0)),
(self.model.find('**/joint_curveNode6'), (0, 0, 0)),
(self.model.find('**/CurveNode7'), (0, 0, 0))))
self.cord.reparentTo(self.model)
self.cord.node().setBounds(BoundingSphere(Point3(-1.0, -3.2, 2.6), 2.0))
def disable(self):
self.notify.debug('disable')
taskMgr.remove(self.uniqueName('ringDoLater'))
self.clearInterval()
if self.phoneGui:
self.phoneGui.hide()
self.phoneGui.unload()
self.phoneGui = None
if self.phoneDialog:
self.phoneDialog.cleanup()
self.phoneDialog = None
self.__receiverToPhone()
if self.hasLocalAvatar:
self.freeAvatar()
self.ignoreAll()
DistributedFurnitureItem.DistributedFurnitureItem.disable(self)
def delete(self):
self.notify.debug('delete')
self.model.cleanup()
DistributedFurnitureItem.DistributedFurnitureItem.delete(self)
def setInitialScale(self, sx, sy, sz):
self.initialScale = (sx, sy, sz)
if not self.usedInitialScale and self.model:
self.setScale(*self.initialScale)
self.usedInitialScale = 1
def __handleEnterSphere(self, collEntry):
if self.smoothStarted:
return
if base.localAvatar.doId == self.lastAvId and globalClock.getFrameTime() <= self.lastTime + 0.5:
self.notify.debug('Ignoring duplicate entry for avatar.')
return
if self.hasLocalAvatar:
self.freeAvatar()
if hasattr(base, 'wantPets') and base.wantPets:
base.localAvatar.lookupPetDNA()
self.notify.debug('Entering Phone Sphere....')
taskMgr.remove(self.uniqueName('ringDoLater'))
self.ignore(self.phoneSphereEnterEvent)
self.cr.playGame.getPlace().detectedPhoneCollision()
self.hasLocalAvatar = 1
self.sendUpdate('avatarEnter')
def __handlePhoneDone(self):
self.sendUpdate('avatarExit')
self.ignore(self.phoneGuiDoneEvent)
self.phoneGui = None
def freeAvatar(self):
if self.hasLocalAvatar:
base.localAvatar.speed = 0
taskMgr.remove(self.uniqueName('lerpCamera'))
base.localAvatar.posCamera(0, 0)
if base.cr.playGame.place != None:
base.cr.playGame.getPlace().setState('walk')
self.hasLocalAvatar = 0
self.ignore(self.pickupMovieDoneEvent)
self.accept(self.phoneSphereEnterEvent, self.__handleEnterSphere)
self.stopSmooth()
self.lastTime = globalClock.getFrameTime()
def setLimits(self, numHouseItems):
self.numHouseItems = numHouseItems
def setMovie(self, mode, avId, timestamp):
elapsed = ClockDelta.globalClockDelta.localElapsedTime(timestamp, bits=32)
elapsed = max(elapsed - self.movieDelay, 0)
self.ignore(self.pickupMovieDoneEvent)
if avId != 0:
self.lastAvId = avId
self.lastTime = globalClock.getFrameTime()
isLocalToon = avId == base.localAvatar.doId
avatar = self.cr.doId2do.get(avId)
if mode == PhoneGlobals.PHONE_MOVIE_CLEAR:
if self.phoneInUse:
self.clearInterval()
self.numHouseItems = None
self.phoneInUse = 0
elif mode == PhoneGlobals.PHONE_MOVIE_EMPTY:
if isLocalToon:
self.phoneDialog = TTDialog.TTDialog(dialogName='PhoneEmpty', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedPhoneEmpty, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
self.numHouseItems = None
self.phoneInUse = 0
elif mode == PhoneGlobals.PHONE_MOVIE_NO_HOUSE:
if isLocalToon:
self.phoneDialog = TTDialog.TTDialog(dialogName='PhoneNoHouse', style=TTDialog.Acknowledge, text=TTLocalizer.DistributedPhoneNoHouse, text_wordwrap=15, fadeScreen=1, command=self.__clearDialog)
self.numHouseItems = None
self.phoneInUse = 0
elif mode == PhoneGlobals.PHONE_MOVIE_PICKUP:
if avatar:
interval = self.takePhoneInterval(avatar)
if isLocalToon:
self.setupCamera(mode)
interval.setDoneEvent(self.pickupMovieDoneEvent)
self.acceptOnce(self.pickupMovieDoneEvent, self.__showPhoneGui)
self.playInterval(interval, elapsed, avatar)
self.phoneInUse = 1
elif mode == PhoneGlobals.PHONE_MOVIE_HANGUP:
if avatar:
interval = self.replacePhoneInterval(avatar)
self.playInterval(interval, elapsed, avatar)
self.numHouseItems = None
self.phoneInUse = 0
else:
self.notify.warning('unknown mode in setMovie: %s' % mode)
def __showPhoneGui(self):
if self.toonScale:
self.sendUpdate('setNewScale', [self.toonScale[0], self.toonScale[1], self.toonScale[2]])
self.phoneGui = CatalogGUI(self, doneEvent=self.phoneGuiDoneEvent)
# Hide the phone until we get our popular items set.
self.phoneGui.hide()
self.__generateCatalogPages()
self.acceptOnce('PopularItemsSet', self.__setPopularItems)
self.cr.catalogManager.fetchPopularItems()
self.accept(self.phoneGuiDoneEvent, self.__handlePhoneDone)
self.accept('phoneAsleep', self.__handlePhoneAsleep)
def __generateCatalogPages(self):
itemList = base.localAvatar.monthlyCatalog.generateList()
itemList += base.localAvatar.weeklyCatalog.generateList()
itemList += base.localAvatar.backCatalog.generateList()
sortedItems = CatalogItemSorter(itemList).sortItems()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in sortedItems['FURNITURE']:
catalogItemList.addItem(item, 'Furniture')
for item in sortedItems['UNSORTED']:
catalogItemList.addItem(item, 'Unsorted Items')
self.phoneGui.tabButtons['FURNITURE_TAB'].setCatalogItemPages(catalogItemList.generatePages())
self.phoneGui.tabButtons['FURNITURE_TAB'].tabClicked()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in sortedItems['EMOTIONS']:
catalogItemList.addItem(item, 'Emotions')
self.phoneGui.tabButtons['EMOTE_TAB'].setCatalogItemPages(catalogItemList.generatePages())
self.phoneGui.tabButtons['EMOTE_TAB'].tabClicked()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in sortedItems['SPECIAL']:
catalogItemList.addItem(item, 'Special')
self.phoneGui.tabButtons['SPECIAL_TAB'].setCatalogItemPages(catalogItemList.generatePages())
self.phoneGui.tabButtons['SPECIAL_TAB'].tabClicked()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in sortedItems['CLOTHING']:
catalogItemList.addItem(item, 'Clothing')
self.phoneGui.tabButtons['CLOTHING_TAB'].setCatalogItemPages(catalogItemList.generatePages())
self.phoneGui.tabButtons['CLOTHING_TAB'].tabClicked()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in sortedItems['PHRASES']:
catalogItemList.addItem(item, 'Phrases')
self.phoneGui.tabButtons['PHRASES_TAB'].setCatalogItemPages(catalogItemList.generatePages())
self.phoneGui.tabButtons['PHRASES_TAB'].tabClicked()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in sortedItems['NAMETAG']:
catalogItemList.addItem(item, 'Nametag')
self.phoneGui.tabButtons['NAMETAG_TAB'].setCatalogItemPages(catalogItemList.generatePages())
self.phoneGui.tabButtons['NAMETAG_TAB'].tabClicked()
def __setPopularItems(self):
# Generate a list of popular items.
itemList = self.cr.catalogManager.popularItems.generateList()
catalogItemList = CatalogItemListGUI(self.phoneGui)
for item in itemList:
catalogItemList.addItem(item, 'Popular')
self.phoneGui.tabButtons['POPULAR_TAB'].setCatalogItemPages(catalogItemList.generatePages())
# Now that the popular items are set we can show the CatalogGUI
self.phoneGui.show()
# We want our default tab to be the popular tab. We need to click it twice to prevent a glitch.
self.phoneGui.tabButtons['POPULAR_TAB'].tabClicked()
def __handlePhoneAsleep(self):
self.ignore('phoneAsleep')
if self.phoneGui:
self.phoneGui.unload()
self.__handlePhoneDone()
def requestPurchase(self, item, callback, optional = -1):
blob = item.getBlob(store=CatalogItem.Customization)
context = self.getCallbackContext(callback, [item])
self.sendUpdate('requestPurchaseMessage', [context, blob, optional])
def requestGiftPurchase(self, item, targetDoID, callback, optional = -1):
blob = item.getBlob(store=CatalogItem.Customization)
context = self.getCallbackContext(callback, [item])
self.sendUpdate('requestGiftPurchaseMessage', [context, targetDoID,
blob, optional])
def requestPurchaseResponse(self, context, retcode):
self.doCallbackContext(context, [retcode])
def requestGiftPurchaseResponse(self, context, retcode):
self.doCallbackContext(context, [retcode])
def __clearDialog(self, event):
self.phoneDialog.cleanup()
self.phoneDialog = None
self.freeAvatar()
def takePhoneInterval(self, toon):
torso = TextEncoder.upper(toon.style.torso[0])
legs = TextEncoder.upper(toon.style.legs[0])
phoneOutAnim = '%s%s_phoneOut' % (torso, legs)
takePhoneAnim = '%s%s_takePhone' % (torso, legs)
phoneNeutralAnim = '%s%s_phoneNeutral' % (torso, legs)
self.toonScale = toon.getGeomNode().getChild(0).getScale(self.getParent())
walkTime = 1.0
scaleTime = 1.0
origScale = self.getScale()
origToonPos = toon.getPos()
origToonHpr = toon.getHpr()
self.origToonHpr = origToonHpr
self.setScale(self.toonScale)
toon.setPosHpr(self, 0, -4.5, 0, 0, 0, 0)
destToonPos = toon.getPos()
destToonHpr = toon.getHpr()
destToonHpr = VBase3(PythonUtil.fitSrcAngle2Dest(destToonHpr[0], origToonHpr[0]), destToonHpr[1], destToonHpr[2])
self.setScale(origScale)
toon.setPos(origToonPos)
toon.setHpr(origToonHpr)
walkToPhone = Sequence(Func(toon.stopSmooth), Func(toon.loop, 'walk'), Func(base.playSfx, base.localAvatar.soundWalk), toon.posHprInterval(walkTime, destToonPos, destToonHpr, blendType='easeInOut'), Func(toon.loop, 'neutral'), Func(toon.startSmooth))
interval = Sequence(Parallel(walkToPhone, ActorInterval(self.model, phoneOutAnim), self.scaleInterval(scaleTime, self.toonScale, blendType='easeInOut')), Parallel(ActorInterval(self.model, takePhoneAnim), ActorInterval(toon, 'takePhone'), Sequence(Wait(0.625), Func(base.playSfx, self.pickUpSfx), Func(self.__receiverToHand, toon), Wait(1), Func(base.playSfx, self.handleSfx))), Func(self.model.loop, phoneNeutralAnim), Func(toon.loop, 'phoneNeutral'), Func(base.playSfx, self.ringSfx))
return interval
def replacePhoneInterval(self, toon):
torso = TextEncoder.upper(toon.style.torso[0])
legs = TextEncoder.upper(toon.style.legs[0])
phoneBackAnim = '%s%s_phoneBack' % (torso, legs)
scaleTime = 1.0
interval = Sequence(Parallel(ActorInterval(self.model, phoneBackAnim), ActorInterval(toon, 'phoneBack'), Sequence(Wait(1.0), Func(self.__receiverToPhone), Func(base.playSfx, self.hangUpSfx))), self.scaleInterval(scaleTime, localAvatar.getGeomNode().getScale()[2], blendType='easeInOut'), Func(toon.loop, 'neutral'))
if self.origToonHpr:
interval.append(Func(toon.setHpr, self.origToonHpr))
self.origToonHpr = None
if toon == base.localAvatar:
interval.append(Func(self.freeAvatar))
return interval
def __receiverToHand(self, toon):
self.receiverGeom.reparentTo(toon.leftHand)
self.receiverGeom.setPosHpr(0.0906813, 0.380375, 0.1, 32.41, 70.68, 137.04)
def __receiverToPhone(self):
self.receiverGeom.reparentTo(self.receiverJoint)
self.receiverGeom.setPosHpr(0, 0, 0, 0, 0, 0)
def playInterval(self, interval, elapsed, avatar):
if self.interval != None:
self.interval.finish()
self.interval = None
self.interval = interval
self.interval.start(elapsed)
if self.intervalAvatar != avatar:
if self.intervalAvatar:
self.ignore(self.intervalAvatar.uniqueName('disable'))
if avatar:
self.accept(avatar.uniqueName('disable'), self.clearInterval)
self.intervalAvatar = avatar
def clearInterval(self):
if self.interval != None:
self.interval.finish()
self.interval = None
if self.intervalAvatar:
self.ignore(self.intervalAvatar.uniqueName('disable'))
self.intervalAvatar = None
self.__receiverToPhone()
self.model.pose('SS_phoneOut', 0)
self.phoneInUse = 0
def ringIfHasPhoneQuest(self, task):
if Quests.avatarHasPhoneQuest(base.localAvatar) and not Quests.avatarHasCompletedPhoneQuest(base.localAvatar):
self.ring()
return Task.done
def ring(self):
if self.phoneInUse:
return 0
phone = self.find('**/prop_phone')
r = 2.0
w = 0.05
shakeOnce = Sequence(Func(phone.setR, r), Wait(w), Func(phone.setR, -r), Wait(w))
shakeSeq = Sequence()
for i in xrange(16):
shakeSeq.append(shakeOnce)
ringIval = Parallel(Func(base.playSfx, self.ringSfx), shakeSeq, Func(phone.setR, 0))
self.playInterval(ringIval, 0.0, None)
def purchaseItemComplete(self):
self.phoneGui.updateItems()
|
|
#!/usr/bin/python
import argparse
import re
import os
class Sample:
def __init__(self,id,file):
self.id = id
self.file = file
def process_input():
##############
# INPUT PROCESSSING
##############
calling_dir = os.path.dirname(os.path.realpath(__file__))
print "########################################################"
print "Take a list of samples and Illumina standard reports and convert"
print "to lgen format which can then be converted to a bed file"
print "########################################################"
parser = argparse.ArgumentParser(description='Convert Illumina standard reports to lgen format')
parser.add_argument('--out_prefix', help='the output prefix', required=True)
parser.add_argument('--sample_file', help='A tab separated file. Column 1 is sample ID and column 2 is the file path', type=str, required=True)
parser.add_argument('--manifest_csv', help='Illumina manifest file in column separated format', type=str, required=True)
parser.add_argument('--skip_indels', help='Skip indel variants in manifest', action='store_true', required=False)
parser.add_argument('--indel_regex', help='Indel regex definition', type=str, default="[DI]", required=False)
parser.add_argument('--snp_col_name_manifest', help='SNP Name in manifest', type=str, default="Name", required=False)
parser.add_argument('--snp_change_col_name_manifest', help='Typically formated [A/G]', type=str, default="SNP", required=False)
parser.add_argument('--chrom_col_name_manifest', help='Chrom column name in manifest', type=str, default="Chr", required=False)
parser.add_argument('--pos_col_name_manifest', help='Position column name in manifest', type=str, default="MapInfo", required=False)
parser.add_argument('--snp_col_name_report', help='SNP Name in report', type=str, default="SNP Name", required=False)
parser.add_argument('--allele_1_col_name_report', help='allele 1 column name in report', type=str, default="Allele1 - Forward", required=False)
parser.add_argument('--allele_2_col_name_report', help='allele 2 column name in report', type=str, default="Allele2 - Forward", required=False)
parser.add_argument('--sep_report', help='delimiter default is a tab', type=str, default="\t", required=False)
args = parser.parse_args()
print
print "########################################################"
print "OPTIONS"
print "########################################################"
print
for attr, value in args.__dict__.iteritems():
print "{0:>25}\t\t{1!s}".format( "--{0}".format(attr), str(value))
sample_file_h = open(args.sample_file,"r")
samples = []
for line in sample_file_h:
cols = line.rstrip().split("\t")
sample = Sample(cols[0],cols[1])
samples.append(sample)
sample_file_h.close()
args.samples = samples
return args
def main():
args = process_input()
skip_indels = args.skip_indels
indel_regex = args.indel_regex
samples = args.samples
manifest_csv = args.manifest_csv
snp_col_name_manifest = args.snp_col_name_manifest
snp_change_col_name_manifest = args.snp_change_col_name_manifest
chrom_col_name_manifest = args.chrom_col_name_manifest
pos_col_name_manifest = args.pos_col_name_manifest
snp_col_name_report = args.snp_col_name_report
allele_1_col_name_report = args.allele_1_col_name_report
allele_2_col_name_report = args.allele_2_col_name_report
out_prefix = args.out_prefix
map_out = "{0}.map".format(out_prefix)
sep_report = args.sep_report
"""
MAP COLS:
chromosome (1-22, X, Y or 0 if unplaced)
rs# or snp identifier
Genetic distance (morgans)
Base-pair position (bp units)
"""
lgen_out = "{0}.lgen".format(out_prefix)
"""
LGEN COLS:
family ID
individual ID
snp ID
allele 1 of this genotype
allele 2 of this genotype
"""
fam_out = "{0}.fam".format(out_prefix)
"""
FAM COLS:
first six columns of PED
Family ID
Individual ID
Paternal ID
Maternal ID
Sex (1=male; 2=female; other=unknown)
Phenotype
"""
print "Checking that all files exists"
if not os.path.isfile(manifest_csv):
print "Manifest {0} does not exists!".format(manifest_csv)
for sample in samples:
if not os.path.isfile(sample.file):
print "Sample {0} file {1} does not exists!".format(sample.id,sample.file)
print "All files appear to exists."
print "Writing out FAM file: {0}".format(fam_out)
fam_out_h = open(fam_out,"w")
for sample in samples:
line_out = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".format(sample.id,sample.id,0,0,0,0)
fam_out_h.write(line_out)
fam_out_h.close()
print "Finished writing FAM file: {0}".format(fam_out)
print
print "Processing manifest file {0}...".format(manifest_csv)
print
manifest_h = open(manifest_csv,"r")
map_out_h = open(map_out,"w")
# Disover header
snp_col_number_manifest = None
snp_change_col_number_manifest = None
chrom_col_number_manifest = None
pos_col_number_manifest = None
manifest_header_col_number = None
###
# SNPs to keep from manifest
###
snps_to_keep = dict()
manifest_line_num = 0
for line in manifest_h:
manifest_line_num += 1
cols = line.rstrip().split(",")
if (snp_col_name_manifest in cols and
snp_change_col_name_manifest in cols and chrom_col_name_manifest in cols and
pos_col_name_manifest in cols):
print "Header for manifest discovered..."
snp_col_number_manifest = cols.index(snp_col_name_manifest)
snp_change_col_number_manifest = cols.index(snp_change_col_name_manifest)
chrom_col_number_manifest = cols.index(chrom_col_name_manifest)
pos_col_number_manifest = cols.index(pos_col_name_manifest)
manifest_header_col_number = len(cols)
break
if snp_col_number_manifest is None:
raise ValueError("Header not discovered in manifest file please check manifest column names...")
for line in manifest_h:
manifest_line_num += 1
cols = line.rstrip().split(",")
if len(cols) != manifest_header_col_number:
print "Number of columns no longer match header at line {0:d}".format(manifest_line_num)
break
snp_id = cols[snp_col_number_manifest]
chrom = cols[chrom_col_number_manifest]
pos = cols[pos_col_number_manifest]
snp_change = cols[snp_change_col_number_manifest]
if manifest_line_num % 50000 == 0:
print "At SNP: {0} and line: {1}".format(snp_id,manifest_line_num)
match = re.search(indel_regex,snp_change)
if skip_indels and match is not None:
continue
else:
# Write to marker file
map_out_line = "{chrom}\t{snp_id}\t{morgan}\t{pos}\n".format(chrom=chrom,snp_id=snp_id,morgan=0,pos=pos)
map_out_h.write(map_out_line)
snps_to_keep[snp_id] = None
manifest_h.close()
map_out_h.close()
print "Finished processing manifest file"
print "Finished writing: {0}".format(map_out)
print "Found {0} variants in manifest".format(len(snps_to_keep))
print
print "Processing {0:d} samples...".format(len(samples))
print
marker_lines = []
header = None
lgen_out_h = open(lgen_out,"w")
sample_count = 0
snp_col_number_report = None
allele_1_col_number_report = None
allele_2_col_number_report = None
for sample in samples:
sample_count += 1
print "Processing {0}".format(sample.id)
print "Processing file: {0}".format(sample.file)
file_handle = open(sample.file,"r")
#Get to first data line
lines_skipped = 0
for line in file_handle:
line = line.rstrip()
lines_skipped += 1
#Match header line
cols = line.split(sep_report)
if (snp_col_name_report in cols and
allele_1_col_name_report in cols and
allele_2_col_name_report in cols):
snp_col_number_report = cols.index(snp_col_name_report)
allele_1_col_number_report = cols.index(allele_1_col_name_report)
allele_2_col_number_report = cols.index(allele_2_col_name_report)
print "Header:"
print line
break
if snp_col_number_report is None:
raise ValueError("Header not discovered in report file please check report column names...")
print "Lines skipped for header: {0:d}".format(lines_skipped)
# read data
line_count = 0
for line in file_handle:
#print line
cols = line.rstrip().split(sep_report)
snp_id = cols[snp_col_number_report]
allele_1 = cols[allele_1_col_number_report]
allele_2 = cols[allele_2_col_number_report]
if line_count % 50000 == 0:
print "At SNP: {0} and line: {1} for sample: {2}".format(snp_id,line_count,sample.id)
#skip
if snp_id not in snps_to_keep:
continue
if allele_1 == "-":
allele_1 = "0"
if allele_2 == "-":
allele_2 = "0"
lgen_line = "{fam_id}\t{ind_id}\t{snp_id}\t{allele_1}\t{allele_2}\n".format(fam_id=sample.id, ind_id=sample.id, snp_id=snp_id,allele_1=allele_1,allele_2=allele_2)
lgen_out_h.write(lgen_line)
line_count += 1
print "Lines {0} processed for sample {1}".format(line_count,sample.id)
file_handle.close()
lgen_out_h.close()
print "Finished writing: {0}".format(lgen_out)
print
print "OUTPUT FILES"
print "FAM: {0}".format(fam_out)
print "LGEN: {0}".format(lgen_out)
print "MAP: {0}".format(map_out)
print "Run: plink --lfile {0} --make-bed --out {0}".format(out_prefix)
if __name__ == '__main__':
main()
|
|
"""kytos.utils.napps tests."""
import json
import re
import tempfile
import unittest
from pathlib import Path, PurePosixPath
from unittest.mock import MagicMock, Mock, PropertyMock, call, patch
from urllib.error import HTTPError
from kytos.utils.exceptions import KytosException
from kytos.utils.napps import NAppsManager
from kytos.utils.settings import SKEL_PATH
# pylint: disable=protected-access, too-many-public-methods
class TestNapps(unittest.TestCase):
"""Test the class NAppsManager."""
def setUp(self):
"""Execute steps before each tests."""
self.napps_manager = NAppsManager()
@staticmethod
def get_napps_response_mock(napps=None):
"""Get mock to napps response."""
if napps is None:
napps = [["kytos", "mef_eline"], ["kytos", "of_lldp"]]
response = json.dumps({"napps": napps})
mock_response = MagicMock()
mock_response.getcode.return_value = 200
mock_response.read.return_value = response
mock_response.__enter__.return_value = mock_response
return mock_response
@patch('urllib.request.urlopen')
def test_enabled_property(self, mock_urlopen):
"""Test enabled property."""
data = MagicMock()
data.read.return_value = '{"napps": "ABC", "installed_napps": "DEF"}'
mock_urlopen.return_value = data
self.assertEqual(str(self.napps_manager._enabled), 'ABC')
def test_enabled_property__error(self):
"""Test enabled property to error case."""
with self.assertRaises(SystemExit):
# pylint: disable=pointless-statement
self.napps_manager._enabled
# pylint: enable=pointless-statement
self.assertIsNone(self.napps_manager._NAppsManager__local_enabled)
@patch('urllib.request.urlopen')
def test_installed_property(self, mock_urlopen):
"""Test installed property."""
data = MagicMock()
data.read.return_value = '{"napps": "ABC", "installed_napps": "DEF"}'
mock_urlopen.return_value = data
self.assertEqual(str(self.napps_manager._installed), 'DEF')
def test_installed_property__error(self):
"""Test installed property to error case."""
with self.assertRaises(SystemExit):
# pylint: disable=pointless-statement
self.napps_manager._installed
# pylint: enable=pointless-statement
self.assertIsNone(self.napps_manager._NAppsManager__local_installed)
def test_napp_id_property(self):
"""Test napp_id property."""
self.napps_manager.user = 'user'
self.napps_manager.napp = 'napp'
self.assertEqual(self.napps_manager.napp_id, 'user/napp')
def test_set_napp(self):
"""Test set_napp method."""
self.napps_manager.set_napp('user', 'napp', 'version')
self.assertEqual(self.napps_manager.user, 'user')
self.assertEqual(self.napps_manager.napp, 'napp')
self.assertEqual(self.napps_manager.version, 'version')
def test_get_napps(self):
"""Test method get_napps used to find
enabled and installed napps.
"""
mock_path = Mock()
def glob_side_effect(args):
"""Path.glob to mock finding paths with kytos.json file."""
self.assertEqual(args, "*/*/kytos.json")
mock_path1 = Mock()
mock_path1.parts = ['kytos', 'of_core', 'kytos.json']
mock_path2 = Mock()
mock_path2.parts = ['kytos', 'of_lldp', 'kytos.json']
return [mock_path1, mock_path2]
mock_path.glob = glob_side_effect
# pylint: disable=protected-access
get_return = self.napps_manager._get_napps(mock_path)
self.assertEqual(get_return[0][0], 'kytos')
self.assertEqual(get_return[0][1], 'of_core')
self.assertEqual(get_return[1][0], 'kytos')
self.assertEqual(get_return[1][1], 'of_lldp')
def test_get_enabled_local(self):
"""Test get_enabled_local used to find
enabled napps in local machine"""
# Mock kytos.json path
mock_path = Mock()
def glob_side_effect(args):
"""Path.glob to mock finding paths with kytos.json file."""
self.assertEqual(args, "*/*/kytos.json")
mock_path1 = Mock()
mock_path1.parts = ['kytos', 'of_core', 'kytos.json']
return [mock_path1]
mock_path.glob = glob_side_effect
mock_prop_enabled = PropertyMock()
with patch.object(NAppsManager, '_enabled', mock_prop_enabled):
mock_prop_enabled.return_value = mock_path
get_return = self.napps_manager.get_enabled_local()
self.assertEqual(get_return[0][0], 'kytos')
self.assertEqual(get_return[0][1], 'of_core')
self.assertEqual(mock_prop_enabled.call_count, 1)
def test_get_installed_local(self):
"""Test get_installed_local used to find
installed napps in local machine"""
# Mock kytos.json path
mock_path = Mock()
def glob_side_effect(args):
"""Path.glob to mock finding paths with kytos.json file."""
self.assertEqual(args, "*/*/kytos.json")
mock_path1 = Mock()
mock_path1.parts = ['kytos', 'of_core', 'kytos.json']
return [mock_path1]
mock_path.glob = glob_side_effect
mock_prop_installed = PropertyMock()
with patch.object(NAppsManager, '_installed', mock_prop_installed):
mock_prop_installed.return_value = mock_path
get_return = self.napps_manager.get_installed_local()
self.assertEqual(get_return[0][0], 'kytos')
self.assertEqual(get_return[0][1], 'of_core')
self.assertEqual(mock_prop_installed.call_count, 1)
@patch('urllib.request.urlopen')
def test_get_installed(self, mock_urlopen):
"""Test method get_installed to find all installed napps."""
mock_urlopen.return_value = self.get_napps_response_mock()
installed_napps = self.napps_manager.get_installed()
self.assertEqual(len(installed_napps), 2)
self.assertEqual(installed_napps[0], ("kytos", "mef_eline"))
self.assertEqual(installed_napps[1], ("kytos", "of_lldp"))
def test_get_installed__connection_error(self):
"""Test method get_installed to connection error case."""
with self.assertRaises(KytosException) as context:
self.napps_manager.get_installed()
self.assertEqual('<urlopen error [Errno 111] Connection refused>',
str(context.exception))
@patch('urllib.request.urlopen')
def test_get_installed__error(self, mock_urlopen):
"""Test method get_installed with API error."""
mock_response = MagicMock()
mock_response.getcode.return_value = 500
mock_urlopen.return_value = mock_response
with self.assertRaises(KytosException) as context:
self.napps_manager.get_installed()
self.assertEqual('Error calling Kytos to check installed NApps.',
str(context.exception))
@patch('urllib.request.urlopen')
def test_get_enabled(self, mock_urlopen):
"""Test method get_enabled to find all enabled napps."""
mock_urlopen.return_value = self.get_napps_response_mock()
installed_napps = self.napps_manager.get_enabled()
self.assertEqual(len(installed_napps), 2)
self.assertEqual(installed_napps[0], ("kytos", "mef_eline"))
self.assertEqual(installed_napps[1], ("kytos", "of_lldp"))
def test_get_enabled__connection_error(self):
"""Test method get_enabled to connection error case."""
with self.assertRaises(KytosException) as context:
self.napps_manager.get_enabled()
self.assertEqual('<urlopen error [Errno 111] Connection refused>',
str(context.exception))
@patch('urllib.request.urlopen')
def test_get_enabled__error(self, mock_urlopen):
"""Test method get_enabled with API error."""
mock_response = MagicMock()
mock_response.getcode.return_value = 500
mock_urlopen.return_value = mock_response
with self.assertRaises(KytosException) as context:
self.napps_manager.get_enabled()
self.assertEqual('Error calling Kytos to check enabled NApps.',
str(context.exception))
@patch('urllib.request.urlopen')
def test_is_enabled(self, mock_urlopen):
"""Test is_enabled method."""
mock_urlopen.return_value = self.get_napps_response_mock()
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.assertTrue(self.napps_manager.is_enabled())
@patch('urllib.request.urlopen')
def test_is_installed(self, mock_urlopen):
"""Test is_installed method."""
mock_urlopen.return_value = self.get_napps_response_mock()
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.assertTrue(self.napps_manager.is_installed())
@patch('urllib.request.urlopen')
def test_get_disabled(self, mock_urlopen):
"""Test get_disabled method."""
enabled = [["kytos", "mef_eline"]]
mock_urlopen.side_effect = [self.get_napps_response_mock(),
self.get_napps_response_mock(enabled)]
disabled = self.napps_manager.get_disabled()
self.assertEqual(disabled, [('kytos', 'of_lldp')])
@patch('urllib.request.urlopen')
def test_dependencies(self, mock_urlopen):
"""Test dependencies method."""
napps = {"napp_dependencies": ["kytos/mef_eline", "kytos/of_lldp"]}
data = MagicMock()
data.read.return_value = json.dumps(napps)
mock_urlopen.return_value = data
dependencies = self.napps_manager.dependencies()
expected_dependencies = [('kytos', 'mef_eline'), ('kytos', 'of_lldp')]
self.assertEqual(dependencies, expected_dependencies)
@patch('urllib.request.urlopen')
def test_get_description(self, mock_urlopen):
"""Test get_description method."""
data = MagicMock()
data.read.return_value = '{"description": "ABC"}'
mock_urlopen.return_value = data
description = self.napps_manager.get_description()
self.assertEqual(description, 'ABC')
@patch('urllib.request.urlopen')
def test_get_version(self, mock_urlopen):
"""Test get_version method."""
data = MagicMock()
data.read.return_value = '{"version": "123"}'
mock_urlopen.return_value = data
version = self.napps_manager.get_version()
self.assertEqual(version, '123')
@patch('urllib.request.urlopen')
def test_get_napp_key(self, mock_urlopen):
"""Test _get_napp_key method."""
data = MagicMock()
data.read.return_value = '{"key": "ABC"}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
meta_key = self.napps_manager._get_napp_key('key')
self.assertEqual(meta_key, 'ABC')
@patch('urllib.request.urlopen')
def test_disable(self, mock_urlopen):
"""Test disable method."""
data = MagicMock()
data.read.return_value = '{}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.napps_manager.disable()
uri = self.napps_manager._kytos_api + self.napps_manager._NAPP_DISABLE
uri = uri.format('kytos', 'mef_eline')
mock_urlopen.assert_called_with(uri)
@patch('kytos.utils.napps.LOG')
@patch('urllib.request.urlopen')
def test_disable__error(self, *args):
"""Test disable method to error case."""
(mock_urlopen, mock_logger) = args
http_errors = [HTTPError('url', 400, 'msg', 'hdrs', MagicMock()),
HTTPError('url', 500, 'msg', 'hdrs', MagicMock())]
mock_urlopen.side_effect = http_errors
self.napps_manager.disable()
self.napps_manager.disable()
self.assertEqual(mock_logger.error.call_count, 2)
@patch('urllib.request.urlopen')
def test_enable(self, mock_urlopen):
"""Test enable method."""
data = MagicMock()
data.read.return_value = '{}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.napps_manager.enable()
uri = self.napps_manager._kytos_api + self.napps_manager._NAPP_ENABLE
uri = uri.format('kytos', 'mef_eline')
mock_urlopen.assert_called_with(uri)
@patch('kytos.utils.napps.LOG')
@patch('urllib.request.urlopen')
def test_enable__error(self, *args):
"""Test enable method to error case."""
(mock_urlopen, mock_logger) = args
http_errors = [HTTPError('url', 400, 'msg', 'hdrs', MagicMock()),
HTTPError('url', 500, 'msg', 'hdrs', MagicMock())]
mock_urlopen.side_effect = http_errors
self.napps_manager.enable()
self.napps_manager.enable()
self.assertEqual(mock_logger.error.call_count, 2)
@patch('urllib.request.urlopen')
def test_enabled_dir(self, mock_urlopen):
"""Test enabled_dir method."""
data = MagicMock()
data.read.return_value = '{"napps": "ABC", "installed_napps": "DEF"}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
enabled_dir = self.napps_manager.enabled_dir()
self.assertEqual(str(enabled_dir), 'ABC/kytos/mef_eline')
@patch('urllib.request.urlopen')
def test_installed_dir(self, mock_urlopen):
"""Test installed_dir method."""
data = MagicMock()
data.read.return_value = '{"napps": "ABC", "installed_napps": "DEF"}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
installed_dir = self.napps_manager.installed_dir()
self.assertEqual(str(installed_dir), 'DEF/kytos/mef_eline')
@patch('urllib.request.urlopen')
def test_remote_uninstall(self, mock_urlopen):
"""Test remote_uninstall method."""
data = MagicMock()
data.read.return_value = '{}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.napps_manager.remote_uninstall()
uninstall_uri = self.napps_manager._NAPP_UNINSTALL
uri = self.napps_manager._kytos_api + uninstall_uri
uri = uri.format('kytos', 'mef_eline')
mock_urlopen.assert_called_with(uri)
@patch('kytos.utils.napps.LOG')
@patch('urllib.request.urlopen')
def test_remote_uninstall__error(self, *args):
"""Test remote_uninstall method to error case."""
(mock_urlopen, mock_logger) = args
http_errors = [HTTPError('url', 400, 'msg', 'hdrs', MagicMock()),
HTTPError('url', 500, 'msg', 'hdrs', MagicMock())]
mock_urlopen.side_effect = http_errors
self.napps_manager.remote_uninstall()
self.napps_manager.remote_uninstall()
self.assertEqual(mock_logger.error.call_count, 2)
@patch('urllib.request.urlopen')
def test_remote_install(self, mock_urlopen):
"""Test remote_install method."""
data = MagicMock()
data.read.return_value = '{}'
mock_urlopen.return_value = data
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.napps_manager.remote_install()
install_uri = self.napps_manager._NAPP_INSTALL
uri = self.napps_manager._kytos_api + install_uri
uri = uri.format('kytos', 'mef_eline')
mock_urlopen.assert_called_with(uri)
def test_valid_name(self):
"""Test valid_name method."""
valid_name = self.napps_manager.valid_name('username')
invalid_name = self.napps_manager.valid_name('_username')
self.assertTrue(valid_name)
self.assertFalse(invalid_name)
@patch('jinja2.Environment.get_template')
def test_render_template(self, mock_get_template):
"""Test render_template method."""
template = MagicMock()
mock_get_template.return_value = template
self.napps_manager.render_template('', 'filename', 'context')
mock_get_template.assert_called_with('filename')
template.render.assert_called_with('context')
@patch('kytos.utils.napps.NAppsClient')
def test_search(self, mock_napps_client):
"""Test search method."""
napp_1 = {'username': 'kytos', 'name': 'mef_eline', 'description': '',
'tags': ['A', 'B']}
napp_2 = {'username': '0_kytos', 'name': 'any', 'description': '',
'tags': ['A', 'B']}
napps_client = MagicMock()
napps_client.get_napps.return_value = [napp_1, napp_2]
mock_napps_client.return_value = napps_client
# pattern to match strings that start with letters
pattern = re.compile('^[a-z]+')
napps = self.napps_manager.search(pattern)
self.assertEqual(napps, [napp_1])
@patch('os.makedirs')
@patch('builtins.open')
@patch('builtins.input')
@patch('kytos.utils.napps.NAppsManager.render_template')
def test_create_napp(self, *args):
"""Test create_napp method."""
(mock_render_template, mock_input, _, mock_mkdirs) = args
mock_input.side_effect = ['username', 'napp', None]
self.napps_manager.create_napp()
tmpl_path = SKEL_PATH / 'napp-structure/username/napp'
description = '# TODO: <<<< Insert your NApp description here >>>>'
context = {'username': 'username', 'napp': 'napp',
'description': description}
calls = []
for tmp in ['__init__.py', 'main.py', '.gitignore', 'kytos.json',
'README.rst', 'settings.py']:
calls.append(call(tmpl_path, '{}.template'.format(tmp), context))
calls.append(call('{}/ui'.format(tmpl_path), 'README.rst.template',
context))
mock_mkdirs.assert_has_calls([call('username', exist_ok=True),
call('username/napp'),
call('username/napp/ui/k-info-panel'),
call('username/napp/ui/k-toolbar'),
call('username/napp/ui/k-action-menu')])
mock_render_template.assert_has_calls(calls, any_order=True)
def test_check_module(self):
"""Test _check_module method."""
folder = MagicMock()
folder.exists.return_value = False
self.napps_manager._check_module(folder)
folder.mkdir.assert_called()
(folder / '__init__.py').touch.assert_called()
@patch('pathspec.pathspec.PathSpec.match_tree')
@patch('tarfile.TarFile.add')
@patch('os.remove')
@patch('os.walk')
@patch('os.getcwd')
@patch('builtins.open')
def test_build_napp_package(self, *args):
"""Test build_napp_package method."""
(_, mock_getcwd, mock_walk, _, mock_add, mock_match_tree) = args
with tempfile.TemporaryDirectory() as tmp_dir:
mock_getcwd.return_value = tmp_dir
files = ['username/napp/A', 'username/napp/B', 'username/napp/C']
mock_walk.return_value = [(tmp_dir, ['username/napp/.git'], files)]
mock_match_tree.return_value = ['username/napp/C']
self.napps_manager.build_napp_package('username/napp')
calls = [call(PurePosixPath('username/napp/A')),
call(PurePosixPath('username/napp/B'))]
mock_add.assert_has_calls(calls)
@patch('ruamel.yaml.YAML.load', return_value='openapi')
@patch('pathlib.Path.open')
@patch('builtins.open')
def test_create_metadata(self, *args):
"""Test create_metadata method."""
(mock_open, _, _) = args
enter_file_1 = MagicMock()
enter_file_1.read.return_value = '{}'
enter_file_2 = MagicMock()
enter_file_2.read.return_value = 'readme'
mock_open.return_value.__enter__.side_effect = [enter_file_1,
enter_file_2]
metadata = self.napps_manager.create_metadata()
self.assertEqual(metadata, {'readme': 'readme',
'OpenAPI_Spec': '"openapi"'})
@patch('kytos.utils.napps.NAppsClient')
@patch('kytos.utils.napps.NAppsManager.build_napp_package')
@patch('kytos.utils.napps.NAppsManager.create_metadata')
@patch('kytos.utils.napps.NAppsManager.prepare')
def test_upload(self, *args):
"""Test upload method."""
(mock_prepare, mock_create, mock_build, mock_napps_client) = args
mock_create.return_value = {'name': 'ABC'}
mock_build.return_value = 'package'
napps_client = MagicMock()
mock_napps_client.return_value = napps_client
self.napps_manager.upload()
mock_prepare.assert_called()
mock_create.assert_called()
mock_build.assert_called_with('ABC')
napps_client.upload_napp.assert_called_with({'name': 'ABC'}, 'package')
@patch('kytos.utils.napps.NAppsClient')
def test_delete(self, mock_napps_client):
"""Test delete method."""
napps_client = MagicMock()
mock_napps_client.return_value = napps_client
self.napps_manager.user = 'kytos'
self.napps_manager.napp = 'mef_eline'
self.napps_manager.delete()
napps_client.delete.assert_called_with('kytos', 'mef_eline')
@patch('sys.exit')
@patch('kytos.utils.napps.OpenAPI')
@patch('kytos.utils.napps.NAppsManager._ask_openapi', return_value=True)
def test_prepare(self, *args):
"""Test prepare method."""
(_, mock_openapi, _) = args
self.napps_manager.prepare()
napp_path = Path()
tpl_path = SKEL_PATH / 'napp-structure/username/napp'
mock_openapi.assert_called_with(napp_path, tpl_path)
mock_openapi.return_value.render_template.assert_called()
@patch('pathlib.Path.exists')
@patch('builtins.input')
def test_ask_openapi(self, *args):
"""Test _ask_openapi method."""
(mock_input, mock_exists) = args
mock_input.side_effect = ['', '', 'yes', 'no']
mock_exists.side_effect = [True, False, False, False]
for expected in [False, True, True, False]:
response = self.napps_manager._ask_openapi()
self.assertEqual(response, expected)
@patch('kytos.utils.napps.NAppsClient')
def test_reload(self, mock_napps_client):
"""Test reload method."""
napps_client = MagicMock()
mock_napps_client.return_value = napps_client
napps = []
self.napps_manager.reload(napps)
napps_client.reload_napps.assert_called_with(napps)
|
|
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lib.named_array."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import enum
import numpy as np
from pysc2.lib import named_array
class NamedDictTest(absltest.TestCase):
def test_named_dict(self):
a = named_array.NamedDict(a=2, b=(1, 2))
self.assertEqual(a["a"], a.a)
self.assertEqual(a["b"], a.b)
self.assertIs(a["b"], a.b)
self.assertNotEqual(a["a"], a.b)
a.c = 3
self.assertEqual(a["c"], 3)
class TestEnum(enum.IntEnum):
a = 0
b = 1
c = 2
class BadEnum(enum.IntEnum):
a = 1
b = 2
c = 3
class TestNamedTuple(collections.namedtuple("TestNamedTuple", ["a", "b", "c"])):
pass
class BadNamedTuple(collections.namedtuple("BadNamedTuple", ["a", "b"])):
pass
class NamedArrayTest(parameterized.TestCase):
def assertArrayEqual(self, a, b):
np.testing.assert_array_equal(a, b)
@parameterized.named_parameters(
("none", None),
("none2", [None]),
("short_list", ["a"]),
("long_list", ["a", "b", "c", "d"]),
("long_list2", [["a", "b", "c", "d"]]),
("ints", [[1, "b", 3]]),
("bad_enum", [BadEnum]),
("bad_namedtuple", [BadNamedTuple]),
("dict", [{"a": 0, "b": 1, "c": 2}]),
("set", [{"a", "b", "c"}]),
)
def test_bad_names(self, names):
with self.assertRaises(ValueError):
named_array.NamedNumpyArray([1, 3, 6], names)
@parameterized.named_parameters(
("list", ["a", "b", "c"]),
("tuple", ("a", "b", "c")),
("list2", [["a", "b", "c"]]),
("tuple2", (("a", "b", "c"))),
("list_tuple", [("a", "b", "c")]),
("named_tuple", TestNamedTuple),
("named_tuple2", [TestNamedTuple]),
("int_enum", TestEnum),
("int_enum2", [TestEnum]),
)
def test_single_dimension(self, names):
a = named_array.NamedNumpyArray([1, 3, 6], names)
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 3)
self.assertEqual(a[2], 6)
self.assertEqual(a[-1], 6)
self.assertEqual(a.a, 1)
self.assertEqual(a.b, 3)
self.assertEqual(a.c, 6)
with self.assertRaises(AttributeError):
a.d # pylint: disable=pointless-statement
self.assertEqual(a["a"], 1)
self.assertEqual(a["b"], 3)
self.assertEqual(a["c"], 6)
with self.assertRaises(KeyError):
a["d"] # pylint: disable=pointless-statement
# New axis = None
self.assertArrayEqual(a, [1, 3, 6])
self.assertArrayEqual(a[np.newaxis], [[1, 3, 6]])
self.assertArrayEqual(a[None], [[1, 3, 6]])
self.assertArrayEqual(a[None, :], [[1, 3, 6]])
self.assertArrayEqual(a[:, None], [[1], [3], [6]])
self.assertArrayEqual(a[None, :, None], [[[1], [3], [6]]])
self.assertArrayEqual(a[None, a % 3 == 0, None], [[[3], [6]]])
self.assertArrayEqual(a[None][None], [[[1, 3, 6]]])
self.assertArrayEqual(a[None][0], [1, 3, 6])
self.assertEqual(a[None, 0], 1)
self.assertEqual(a[None, "a"], 1)
self.assertEqual(a[None][0].a, 1)
self.assertEqual(a[None][0, "b"], 3)
# range slicing
self.assertArrayEqual(a[0:2], [1, 3])
self.assertArrayEqual(a[1:3], [3, 6])
self.assertArrayEqual(a[0:2:], [1, 3])
self.assertArrayEqual(a[0:2:1], [1, 3])
self.assertArrayEqual(a[::2], [1, 6])
self.assertArrayEqual(a[::-1], [6, 3, 1])
self.assertEqual(a[1:3][0], 3)
self.assertEqual(a[1:3].b, 3)
self.assertEqual(a[1:3].c, 6)
# list slicing
self.assertArrayEqual(a[[0, 0]], [1, 1])
self.assertArrayEqual(a[[0, 1]], [1, 3])
self.assertArrayEqual(a[[1, 0]], [3, 1])
self.assertArrayEqual(a[[1, 2]], [3, 6])
self.assertArrayEqual(a[np.array([0, 2])], [1, 6])
self.assertEqual(a[[1, 2]].b, 3)
self.assertEqual(a[[2, 0]].c, 6)
with self.assertRaises(TypeError):
# Duplicates lead to unnamed dimensions.
a[[0, 0]].a # pylint: disable=pointless-statement
a[1] = 4
self.assertEqual(a[1], 4)
self.assertEqual(a.b, 4)
self.assertEqual(a["b"], 4)
a[1:2] = 2
self.assertEqual(a[1], 2)
self.assertEqual(a.b, 2)
self.assertEqual(a["b"], 2)
a[[1]] = 3
self.assertEqual(a[1], 3)
self.assertEqual(a.b, 3)
self.assertEqual(a["b"], 3)
a.b = 5
self.assertEqual(a[1], 5)
self.assertEqual(a.b, 5)
self.assertEqual(a["b"], 5)
def test_empty_array(self):
named_array.NamedNumpyArray([], [None, ["a", "b"]])
with self.assertRaises(ValueError):
# Must be the right length.
named_array.NamedNumpyArray([], [["a", "b"]])
with self.assertRaises(ValueError):
# Returning an empty slice is not supported, and it's not clear how or
# even if it should be supported.
named_array.NamedNumpyArray([], [["a", "b"], None])
with self.assertRaises(ValueError):
# Scalar arrays are unsupported.
named_array.NamedNumpyArray(1, [])
def test_named_array_multi_first(self):
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [["a", "b"], None])
self.assertArrayEqual(a.a, [1, 3])
self.assertArrayEqual(a[1], [6, 8])
self.assertArrayEqual(a["b"], [6, 8])
self.assertArrayEqual(a[::-1], [[6, 8], [1, 3]])
self.assertArrayEqual(a[::-1][::-1], [[1, 3], [6, 8]])
self.assertArrayEqual(a[::-1, ::-1], [[8, 6], [3, 1]])
self.assertArrayEqual(a[::-1][0], [6, 8])
self.assertArrayEqual(a[::-1, 0], [6, 1])
self.assertArrayEqual(a[::-1, 1], [8, 3])
self.assertArrayEqual(a[::-1].a, [1, 3])
self.assertArrayEqual(a[::-1].a[0], 1)
self.assertArrayEqual(a[::-1].b, [6, 8])
self.assertArrayEqual(a[[0, 0]], [[1, 3], [1, 3]])
with self.assertRaises(TypeError):
a[[0, 0]].a # pylint: disable=pointless-statement
self.assertEqual(a[0, 1], 3)
self.assertEqual(a[(0, 1)], 3)
self.assertEqual(a["a", 0], 1)
self.assertEqual(a["b", 0], 6)
self.assertEqual(a["b", 1], 8)
self.assertEqual(a.a[0], 1)
self.assertArrayEqual(a[a > 2], [3, 6, 8])
self.assertArrayEqual(a[a % 3 == 0], [3, 6])
with self.assertRaises(TypeError):
a[0].a # pylint: disable=pointless-statement
# New axis = None
self.assertArrayEqual(a, [[1, 3], [6, 8]])
self.assertArrayEqual(a[np.newaxis], [[[1, 3], [6, 8]]])
self.assertArrayEqual(a[None], [[[1, 3], [6, 8]]])
self.assertArrayEqual(a[None, :], [[[1, 3], [6, 8]]])
self.assertArrayEqual(a[None, "a"], [[1, 3]])
self.assertArrayEqual(a[:, None], [[[1, 3]], [[6, 8]]])
self.assertArrayEqual(a[None, :, None], [[[[1, 3]], [[6, 8]]]])
self.assertArrayEqual(a[None, 0, None], [[[1, 3]]])
self.assertArrayEqual(a[None, "a", None], [[[1, 3]]])
self.assertArrayEqual(a[None][None], [[[[1, 3], [6, 8]]]])
self.assertArrayEqual(a[None][0], [[1, 3], [6, 8]])
self.assertArrayEqual(a[None][0].a, [1, 3])
self.assertEqual(a[None][0].a[0], 1)
self.assertEqual(a[None][0, "b", 1], 8)
def test_named_array_multi_second(self):
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [None, ["a", "b"]])
self.assertArrayEqual(a[0], [1, 3])
self.assertEqual(a[0, 1], 3)
self.assertEqual(a[0, "a"], 1)
self.assertEqual(a[0, "b"], 3)
self.assertEqual(a[1, "b"], 8)
self.assertEqual(a[0].a, 1)
self.assertArrayEqual(a[a > 2], [3, 6, 8])
self.assertArrayEqual(a[a % 3 == 0], [3, 6])
with self.assertRaises(TypeError):
a.a # pylint: disable=pointless-statement
self.assertArrayEqual(a[None, :, "a"], [[1, 6]])
def test_masking(self):
a = named_array.NamedNumpyArray([[1, 2, 3, 4], [5, 6, 7, 8]],
[None, list("abcd")])
self.assertArrayEqual(a[a > 2], [3, 4, 5, 6, 7, 8])
self.assertArrayEqual(a[a < 4], [1, 2, 3])
self.assertArrayEqual(a[a % 2 == 0], [2, 4, 6, 8])
self.assertArrayEqual(a[a % 3 == 0], [3, 6])
def test_slicing(self):
a = named_array.NamedNumpyArray([1, 2, 3, 4, 5], list("abcde"))
self.assertArrayEqual(a[:], [1, 2, 3, 4, 5])
self.assertArrayEqual(a[::], [1, 2, 3, 4, 5])
self.assertArrayEqual(a[::2], [1, 3, 5])
self.assertArrayEqual(a[::-1], [5, 4, 3, 2, 1])
self.assertEqual(a[:].a, 1)
self.assertEqual(a[::].b, 2)
self.assertEqual(a[::2].c, 3)
with self.assertRaises(AttributeError):
a[::2].d # pylint: disable=pointless-statement
self.assertEqual(a[::-1].e, 5)
self.assertArrayEqual(a[a % 2 == 0], [2, 4])
self.assertEqual(a[a % 2 == 0].b, 2)
a = named_array.NamedNumpyArray([[1, 2, 3, 4], [5, 6, 7, 8]],
[None, list("abcd")])
self.assertArrayEqual(a[:], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[::], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[:, :], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[:, ...], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[..., ::], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[:, ::2], [[1, 3], [5, 7]])
self.assertArrayEqual(a[::-1], [[5, 6, 7, 8], [1, 2, 3, 4]])
self.assertArrayEqual(a[..., ::-1], [[4, 3, 2, 1], [8, 7, 6, 5]])
self.assertArrayEqual(a[:, ::-1], [[4, 3, 2, 1], [8, 7, 6, 5]])
self.assertArrayEqual(a[:, ::-2], [[4, 2], [8, 6]])
self.assertArrayEqual(a[:, -2::-2], [[3, 1], [7, 5]])
self.assertArrayEqual(a[::-1, -2::-2], [[7, 5], [3, 1]])
self.assertArrayEqual(a[..., 0, 0], 1) # weird scalar arrays...
a = named_array.NamedNumpyArray(
[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[[8, 9], [10, 11]], [[12, 13], [14, 15]]]],
[["a", "b"], ["c", "d"], ["e", "f"], ["g", "h"]])
self.assertEqual(a.a.c.e.g, 0)
self.assertEqual(a.b.c.f.g, 10)
self.assertEqual(a.b.d.f.h, 15)
self.assertArrayEqual(a[0, ..., 0], [[0, 2], [4, 6]])
self.assertArrayEqual(a[0, ..., 1], [[1, 3], [5, 7]])
self.assertArrayEqual(a[0, 0, ..., 1], [1, 3])
self.assertArrayEqual(a[0, ..., 1, 1], [3, 7])
self.assertArrayEqual(a[..., 1, 1], [[3, 7], [11, 15]])
self.assertArrayEqual(a[1, 0, ...], [[8, 9], [10, 11]])
self.assertArrayEqual(a["a", ..., "g"], [[0, 2], [4, 6]])
self.assertArrayEqual(a["a", ...], [[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
self.assertArrayEqual(a[..., "g"], [[[0, 2], [4, 6]], [[8, 10], [12, 14]]])
self.assertArrayEqual(a["a", "c"], [[0, 1], [2, 3]])
self.assertArrayEqual(a["a", ...].c, [[0, 1], [2, 3]])
self.assertArrayEqual(a["a", ..., "g"].c, [0, 2])
with self.assertRaises(TypeError):
a[np.array([[0, 1], [0, 1]])] # pylint: disable=pointless-statement, expression-not-assigned
with self.assertRaises(IndexError):
a[..., 0, ...] # pylint: disable=pointless-statement
def test_string(self):
a = named_array.NamedNumpyArray([1, 3, 6], ["a", "b", "c"], dtype=np.int32)
self.assertEqual(str(a), "[1 3 6]")
self.assertEqual(repr(a), ("NamedNumpyArray([1, 3, 6], ['a', 'b', 'c'], "
"dtype=int32)"))
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [None, ["a", "b"]])
self.assertEqual(str(a), "[[1 3]\n [6 8]]")
self.assertEqual(repr(a), ("NamedNumpyArray([[1, 3],\n"
" [6, 8]], [None, ['a', 'b']])"))
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [["a", "b"], None])
self.assertEqual(str(a), "[[1 3]\n [6 8]]")
self.assertEqual(repr(a), ("NamedNumpyArray([[1, 3],\n"
" [6, 8]], [['a', 'b'], None])"))
a = named_array.NamedNumpyArray([0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[str(i) for i in range(13)], dtype=np.int32)
numpy_repr = np.array_repr(a)
if "\n" in numpy_repr: # ie numpy > 1.14
self.assertEqual(repr(a), """
NamedNumpyArray([ 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['0', '1', '2', '3', '4', '...', '8', '9', '10', '11', '12'],
dtype=int32)""".strip()) # Keep the middle newlines.
else:
self.assertEqual(repr(a), (
"NamedNumpyArray("
"[ 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0], "
"['0', '1', '2', '3', '4', '...', '8', '9', '10', '11', '12'], "
"dtype=int32)")) # Note the lack of newlines.
a = named_array.NamedNumpyArray([list(range(50))] * 50,
[None, ["a%s" % i for i in range(50)]])
self.assertIn("49", str(a))
self.assertIn("49", repr(a))
self.assertIn("a4", repr(a))
self.assertIn("a49", repr(a))
a = named_array.NamedNumpyArray([list(range(50))] * 50,
[["a%s" % i for i in range(50)], None])
self.assertIn("49", str(a))
self.assertIn("49", repr(a))
self.assertIn("a4", repr(a))
self.assertIn("a49", repr(a))
def test_pickle(self):
arr = named_array.NamedNumpyArray([1, 3, 6], ["a", "b", "c"])
pickled = pickle.loads(pickle.dumps(arr))
self.assertTrue(np.all(arr == pickled))
self.assertEqual(repr(pickled),
"NamedNumpyArray([1, 3, 6], ['a', 'b', 'c'])")
if __name__ == "__main__":
absltest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# fsfs-reshard.py REPOS_PATH MAX_FILES_PER_SHARD
#
# Perform an offline conversion of an FSFS repository between linear (format
# 2, usable by Subversion 1.4+) and sharded (format 3, usable by Subversion
# 1.5+) layouts.
#
# The MAX_FILES_PER_SHARD argument specifies the maximum number of files
# that will be stored in each shard (directory), or zero to specify a linear
# layout. Subversion 1.5 uses a default value of 1000 files per shard.
#
# As the repository will not be valid while the conversion is in progress,
# the repository administrator must ensure that access to the repository is
# blocked for the duration of the conversion.
#
# In the event that the conversion is interrupted, the repository will be in
# an inconsistent state. The repository administrator should then re-run
# this tool to completion.
#
#
# Note that, currently, resharding from one sharded layout to another is
# likely to be an extremely slow process. To reshard, we convert from a
# sharded to linear layout and then to the new sharded layout. The problem
# is that the initial conversion to the linear layout triggers exactly the
# same 'large number of files in a directory' problem that sharding is
# intended to solve.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# $HeadURL$
# $LastChangedDate$
# $LastChangedBy$
# $LastChangedRevision$
import os, stat, sys
from errno import EEXIST
def usage():
"""Print a usage message and exit."""
print("""usage: %s REPOS_PATH MAX_FILES_PER_SHARD [START END]
Perform an offline conversion of an FSFS repository between linear
(readable by Subversion 1.4 or later) and sharded (readable by
Subversion 1.5 or later) layouts.
The MAX_FILES_PER_SHARD argument specifies the maximum number of
files that will be stored in each shard (directory), or zero to
specify a linear layout. Subversion 1.5 uses a default value of
1000 files per shard.
Convert revisions START through END inclusive if specified, or all
revisions if unspecified.
""" % sys.argv[0])
sys.exit(1)
def incompatible_repos_format(repos_path, format):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible repository format FORMAT, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository is not compatible with this tool. Valid
repository formats are '3' or '5'; this repository is
format '%s'.
""" % (repos_path, format))
sys.stderr.flush()
sys.exit(1)
def incompatible_fs_format(repos_path, format):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible filesystem format FORMAT, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository contains a filesystem that is not compatible with
this tool. Valid filesystem formats are '1', '2', or '3'; this
repository contains a filesystem with format '%s'.
""" % (repos_path, format))
sys.stderr.flush()
sys.exit(1)
def unexpected_fs_format_options(repos_path):
"""Print an error saying that REPOS_PATH is a repository with
unexpected filesystem format options, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository contains a filesystem that appears to be invalid -
there is unexpected data after the filesystem format number.
""" % repos_path)
sys.stderr.flush()
sys.exit(1)
def incompatible_fs_format_option(repos_path, option):
"""Print an error saying that REPOS_PATH is a repository with an
incompatible filesystem format option OPTION, then exit."""
sys.stderr.write("""error: unable to convert repository '%s'.
This repository contains a filesystem that is not compatible with
this tool. This tool recognises the 'layout' option but the
filesystem uses the '%s' option.
""" % (repos_path, option))
sys.stderr.flush()
sys.exit(1)
def warn_about_fs_format_1(repos_path, format_path):
"""Print a warning saying that REPOS_PATH contains a format 1 FSFS
filesystem that we can't reconstruct, then exit."""
sys.stderr.write("""warning: conversion of '%s' will be one-way.
This repository is currently readable by Subversion 1.1 or later.
This tool can convert this repository to one that is readable by
either Subversion 1.4 (or later) or Subversion 1.5 (or later),
but it is not able to convert it back to the original format - a
separate dump/load step would be required.
If you would like to upgrade this repository anyway, delete the
file '%s' and re-run this tool.
""" % (repos_path, format_path))
sys.stderr.flush()
sys.exit(1)
def check_repos_format(repos_path):
"""Check that REPOS_PATH contains a repository with a suitable format;
print a message and exit if not."""
format_path = os.path.join(repos_path, 'format')
try:
format_file = open(format_path)
format = format_file.readline()
if not format.endswith('\n'):
incompatible_repos_format(repos_path, format + ' <missing newline>')
format = format.rstrip('\n')
if format == '3' or format == '5':
pass
else:
incompatible_repos_format(repos_path, format)
except IOError:
# In all likelihood, the file doesn't exist.
incompatible_repos_format(repos_path, '<unreadable>')
def check_fs_format(repos_path):
"""Check that REPOS_PATH contains a filesystem with a suitable format,
or that it contains no format file; print a message and exit if neither
is true. Return bool whether the filesystem is sharded."""
sharded = False
db_path = os.path.join(repos_path, 'db')
format_path = os.path.join(db_path, 'format')
try:
format_file = open(format_path)
format = format_file.readline()
if not format.endswith('\n'):
incompatible_fs_format(repos_path, format + ' <missing newline>')
format = format.rstrip('\n')
if format == '1':
# This is a format 1 (svndiff0 only) filesystem. We can upgrade it,
# but we can't downgrade again (since we can't uncompress any of the
# svndiff1 deltas that may have been written). Warn the user and exit.
warn_about_fs_format_1(repos_path, format_path)
if format == '2':
pass
elif format == '3':
pass
else:
incompatible_fs_format(repos_path, format)
for line in format_file:
if format == '2':
unexpected_fs_format_options(repos_path)
line = line.rstrip('\n')
if line == 'layout linear':
pass
elif line.startswith('layout sharded '):
sharded = True
else:
incompatible_fs_format_option(repos_path, line)
format_file.close()
except IOError:
# The format file might not exist if we've previously been interrupted,
# or if the user is following our advice about upgrading a format 1
# repository. In both cases, we'll just assume the format was
# compatible.
pass
return sharded
def current_file(repos_path):
"""Return triple of (revision, next_node_id, next_copy_id) from
REPOS_PATH/db/current ."""
return open(os.path.join(repos_path, 'db', 'current')).readline().split()
def remove_fs_format(repos_path):
"""Remove the filesystem format file for repository REPOS_PATH.
Do not raise an error if the file is already missing."""
format_path = os.path.join(repos_path, 'db', 'format')
try:
statinfo = os.stat(format_path)
except OSError:
# The file probably doesn't exist.
return
# On Windows, we need to ensure the file is writable before we can
# remove it.
os.chmod(format_path, statinfo.st_mode | stat.S_IWUSR)
os.remove(format_path)
def write_fs_format(repos_path, contents):
"""Write a new filesystem format file for repository REPOS_PATH containing
CONTENTS."""
format_path = os.path.join(repos_path, 'db', 'format')
f = open(format_path, 'wb')
f.write(contents)
f.close()
os.chmod(format_path, stat.S_IRUSR | stat.S_IRGRP)
def linearise(path):
"""Move all the files in subdirectories of PATH into PATH, and remove the
subdirectories. Handle conflicts between subdirectory names and files
contained in subdirectories by ensuring subdirectories have a '.shard'
suffix prior to moving (the files are assumed not to have this suffix.
Abort if a subdirectory is found to contain another subdirectory."""
# First enumerate all subdirectories of DIR and rename where necessary
# to include a .shard suffix.
for name in os.listdir(path):
if name.endswith('.shard'):
continue
subdir_path = os.path.join(path, name)
if not os.path.isdir(subdir_path):
continue
os.rename(subdir_path, subdir_path + '.shard')
# Now move all the subdirectory contents into the parent and remove
# the subdirectories.
for root_path, dirnames, filenames in os.walk(path):
if root_path == path:
continue
if len(dirnames) > 0:
sys.stderr.write("error: directory '%s' contains other unexpected directories.\n" \
% root_path)
sys.stderr.flush()
sys.exit(1)
for name in filenames:
from_path = os.path.join(root_path, name)
to_path = os.path.join(path, name)
os.rename(from_path, to_path)
os.rmdir(root_path)
def shard(path, max_files_per_shard, start, end):
"""Move the files for revisions START to END inclusive in PATH into
subdirectories of PATH named such that subdirectory '0' contains at most
MAX_FILES_PER_SHARD files, those named [0, MAX_FILES_PER_SHARD). Abort if
PATH is found to contain any entries with non-numeric names."""
tmp = path + '.reshard'
try:
os.mkdir(tmp)
except OSError, e:
if e.errno != EEXIST:
raise
# Move all entries into shards named N.shard.
for rev in range(start, end + 1):
name = str(rev)
shard = rev // max_files_per_shard
shard_name = str(shard) + '.shard'
from_path = os.path.join(path, name)
to_path = os.path.join(tmp, shard_name, name)
try:
os.rename(from_path, to_path)
except OSError:
# The most likely explanation is that the shard directory doesn't
# exist. Let's create it and retry the rename.
os.mkdir(os.path.join(tmp, shard_name))
os.rename(from_path, to_path)
# Now rename all the shards to remove the suffix.
skipped = 0
for name in os.listdir(tmp):
if not name.endswith('.shard'):
sys.stderr.write("warning: ignoring unexpected subdirectory '%s'.\n" \
% os.path.join(tmp, name))
sys.stderr.flush()
skipped += 1
continue
from_path = os.path.join(tmp, name)
to_path = os.path.join(path, os.path.basename(from_path)[:-6])
os.rename(from_path, to_path)
skipped == 0 and os.rmdir(tmp)
def main():
if len(sys.argv) < 3:
usage()
repos_path = sys.argv[1]
max_files_per_shard = sys.argv[2]
try:
start = int(sys.argv[3])
end = int(sys.argv[4])
except IndexError:
start = 0
end = int(current_file(repos_path)[0])
# Validate the command-line arguments.
db_path = os.path.join(repos_path, 'db')
current_path = os.path.join(db_path, 'current')
if not os.path.exists(current_path):
sys.stderr.write("error: '%s' doesn't appear to be a Subversion FSFS repository.\n" \
% repos_path)
sys.stderr.flush()
sys.exit(1)
try:
max_files_per_shard = int(max_files_per_shard)
except ValueError, OverflowError:
sys.stderr.write("error: maximum files per shard ('%s') is not a valid number.\n" \
% max_files_per_shard)
sys.stderr.flush()
sys.exit(1)
if max_files_per_shard < 0:
sys.stderr.write("error: maximum files per shard ('%d') must not be negative.\n" \
% max_files_per_shard)
sys.stderr.flush()
sys.exit(1)
# Check the format of the repository.
check_repos_format(repos_path)
sharded = check_fs_format(repos_path)
# Let the user know what's going on.
if max_files_per_shard > 0:
print("Converting '%s' to a sharded structure with %d files per directory" \
% (repos_path, max_files_per_shard))
if sharded:
print('(will convert to a linear structure first)')
else:
print("Converting '%s' to a linear structure" % repos_path)
# Prevent access to the repository for the duration of the conversion.
# There's no clean way to do this, but since the format of the repository
# is indeterminate, let's remove the format file while we're converting.
print('- marking the repository as invalid')
remove_fs_format(repos_path)
# First, convert to a linear scheme (this makes recovery easier because
# it's easier to reason about the behaviour on restart).
if sharded:
print('- linearising db/revs')
linearise(os.path.join(repos_path, 'db', 'revs'))
print('- linearising db/revprops')
linearise(os.path.join(repos_path, 'db', 'revprops'))
if max_files_per_shard == 0:
# We're done. Stamp the filesystem with a format 2 db/format file.
print('- marking the repository as a valid linear repository')
write_fs_format(repos_path, '2\n')
else:
print('- sharding db/revs')
shard(os.path.join(repos_path, 'db', 'revs'), max_files_per_shard,
start, end)
print('- sharding db/revprops')
shard(os.path.join(repos_path, 'db', 'revprops'), max_files_per_shard,
start, end)
# We're done. Stamp the filesystem with a format 3 db/format file.
print('- marking the repository as a valid sharded repository')
write_fs_format(repos_path, '3\nlayout sharded %d\n' % max_files_per_shard)
print('- done.')
sys.exit(0)
if __name__ == '__main__':
raise Exception("""This script is unfinished and not ready to be used on live data.
Trust us.""")
main()
|
|
# Waltz
# Compare results between wild type and mutant
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from scipy import stats
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
import pylab
from numpy import *
def getColumn(filename, column,deli):
results = csv.reader(open(filename), delimiter=deli)
return [result[column] for result in results]
#import files
file_wt = 'waltzresults_wt.csv'
file_mut = 'waltzresults_mut.csv'
#------------------------------------
# AGGREGATION
#------------------------------------
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=99.665552
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('waltz_wtVSmut.jpg')
#----------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
#plt.ylim(0,0.0)
plt.legend(loc='upper right')
fig.savefig('histwaltz_missense.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.3552063996073398, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) # (T, pvalue) = (4898.0, 0.29548245005836105)
#So we do not reject H0 -> There is no significant difference between wt and mut
#--------------------------------------
# AGGREGATION ENVIRONMENT
#--------------------------------------
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,4,'\t')
pred_mut = getColumn(file_mut,4,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=98.996656
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('waltz_envt_wtVSmut.jpg')
#--------------------------------------
# HISTOGRAM
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
plt.ylim(0,0.06)
plt.legend(loc='upper right')
fig.savefig('histwaltzenvt_missense.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.34964202670995748, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) #-> (T, pvalue) = (8711.0, 0.55024961096028457)
#So we do not reject H0 -> There is no significant difference between wt and mut
#-----------------------------------------------------------------------------
# OUTLIERS FOR AGGREGATION ()
#-----------------------------------------------------------------------------
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
pred_envt_wt = getColumn(file_wt,4,'\t')
pred_envt_mut = getColumn(file_mut,4,'\t')
pred_envt_wt.pop(0)
pred_envt_mut.pop(0)
variant_liste = getColumn(file_wt,0,'\t')
output = open('waltz_outliers.csv','w')
output.write('ID,agg_wt,agg_mut,difference,agg_envt_wt,agg_envt_mut,difference_envt\n')
for i in range(0,len(pred_wt)):
for j in range(0,len(pred_mut)):
if i==j:
if pred_wt[i]!='NA'and pred_mut[j]!='NA':
if (abs(float(pred_wt[i])-float(pred_mut[j]))) > 20:
output.write(variant_liste[i+1] + ',' + pred_wt[i] + ',' + pred_mut[j] + ',' + str(abs(float(pred_wt[i])-float(pred_mut[j]))) + ',' + pred_envt_wt[i] + ',' + pred_envt_mut[i] + ',' + str(abs(float(pred_envt_wt[i])-float(pred_envt_mut[j]))) + '\n')
output.close()
#-------------------------------------------------------------------------------
#COMPARISON WITH NETSURFP RSA
#-------------------------------------------------------------------------------
W_wt = pd.read_csv(file_wt,'\t')
W_mut = pd.read_csv(file_mut,'\t')
W_wt['DWaltz'] = ''
W_wt['DWaltz'] = W_wt.aggregation - W_mut.aggregation
W_wt['DWaltz_envt'] = ''
W_wt['DWaltz_envt'] = W_wt.aggregation_envt - W_mut.aggregation_envt
W_wt = W_wt.drop(['aggregation','aggregation_envt'], 1)
W_wt.to_csv('waltzresults_compare.csv', index=False)
#RESIDUE
waltz = getColumn('waltzresults_compare.csv',3,',')
waltz.pop(0)
netsurfp = getColumn('netsurfpresults_compare.csv',3,',')
netsurfp.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp)): #min=-0.183 and max=0.302
if netsurfp[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp[i]))
for i in range(0,len(waltz)): #min=-98.862207 and max=98.327759
if waltz[i]=='':
y.append(np.nan)
else:
y.append(float(waltz[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp.jpg')
#ENVIRONMENT
waltz_envt = getColumn('waltzresults_compare.csv',4,',')
waltz_envt.pop(0)
netsurfp_envt = getColumn('netsurfpresults_compare.csv',4,',')
netsurfp_envt.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp_envt)): #min=-0.183 and max=0.302
if netsurfp_envt[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp_envt[i]))
for i in range(0,len(waltz_envt)): #min=-98.862207 and max=98.327759
if waltz_envt[i]=='':
y.append(np.nan)
else:
y.append(float(waltz_envt[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp_envt.jpg')
|
|
"""
API for the command-line I{pyflakes} tool.
"""
from __future__ import with_statement
import ast
import os
import platform
import re
import sys
from pyflakes import checker, __version__
from pyflakes import reporter as modReporter
__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main']
PYTHON_SHEBANG_REGEX = re.compile(br'^#!.*\bpython([23](\.\d+)?|w)?[dmu]?\s')
def check(codeString, filename, reporter=None):
"""
Check the Python source given by C{codeString} for flakes.
@param codeString: The Python source to check.
@type codeString: C{str}
@param filename: The name of the file the source came from, used to report
errors.
@type filename: C{str}
@param reporter: A L{Reporter} instance, where errors and warnings will be
reported.
@return: The number of warnings emitted.
@rtype: C{int}
"""
if reporter is None:
reporter = modReporter._makeDefaultReporter()
# First, compile into an AST and handle syntax errors.
try:
tree = ast.parse(codeString, filename=filename)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
if checker.PYPY:
if text is None:
lines = codeString.splitlines()
if len(lines) >= lineno:
text = lines[lineno - 1]
if sys.version_info >= (3, ) and isinstance(text, bytes):
try:
text = text.decode('ascii')
except UnicodeDecodeError:
text = None
offset -= 1
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(filename, 'problem decoding source')
else:
reporter.syntaxError(filename, msg, lineno, offset, text)
return 1
except Exception:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
# Okay, it's syntactically valid. Now check it.
file_tokens = checker.make_tokens(codeString)
w = checker.Checker(tree, file_tokens=file_tokens, filename=filename)
w.messages.sort(key=lambda m: m.lineno)
for warning in w.messages:
reporter.flake(warning)
return len(w.messages)
def checkPath(filename, reporter=None):
"""
Check the given path, printing out any warnings detected.
@param reporter: A L{Reporter} instance, where errors and warnings will be
reported.
@return: the number of warnings printed
"""
if reporter is None:
reporter = modReporter._makeDefaultReporter()
try:
with open(filename, 'rb') as f:
codestr = f.read()
except IOError:
msg = sys.exc_info()[1]
reporter.unexpectedError(filename, msg.args[1])
return 1
return check(codestr, filename, reporter)
def isPythonFile(filename):
"""Return True if filename points to a Python file."""
if filename.endswith('.py'):
return True
# Avoid obvious Emacs backup files
if filename.endswith("~"):
return False
max_bytes = 128
try:
with open(filename, 'rb') as f:
text = f.read(max_bytes)
if not text:
return False
except IOError:
return False
return PYTHON_SHEBANG_REGEX.match(text)
def iterSourceCode(paths):
"""
Iterate over all Python source files in C{paths}.
@param paths: A list of paths. Directories will be recursed into and
any .py files found will be yielded. Any non-directories will be
yielded as-is.
"""
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
full_path = os.path.join(dirpath, filename)
if isPythonFile(full_path):
yield full_path
else:
yield path
def checkRecursive(paths, reporter):
"""
Recursively check all source files in C{paths}.
@param paths: A list of paths to Python source files and directories
containing Python source files.
@param reporter: A L{Reporter} where all of the warnings and errors
will be reported to.
@return: The number of warnings found.
"""
warnings = 0
for sourcePath in iterSourceCode(paths):
warnings += checkPath(sourcePath, reporter)
return warnings
def _exitOnSignal(sigName, message):
"""Handles a signal with sys.exit.
Some of these signals (SIGPIPE, for example) don't exist or are invalid on
Windows. So, ignore errors that might arise.
"""
import signal
try:
sigNumber = getattr(signal, sigName)
except AttributeError:
# the signal constants defined in the signal module are defined by
# whether the C library supports them or not. So, SIGPIPE might not
# even be defined.
return
def handler(sig, f):
sys.exit(message)
try:
signal.signal(sigNumber, handler)
except ValueError:
# It's also possible the signal is defined, but then it's invalid. In
# this case, signal.signal raises ValueError.
pass
def _get_version():
"""
Retrieve and format package version along with python version & OS used
"""
return ('%s Python %s on %s' %
(__version__, platform.python_version(), platform.system()))
def main(prog=None, args=None):
"""Entry point for the script "pyflakes"."""
import argparse
# Handle "Keyboard Interrupt" and "Broken pipe" gracefully
_exitOnSignal('SIGINT', '... stopped')
_exitOnSignal('SIGPIPE', 1)
parser = argparse.ArgumentParser(prog=prog,
description='Check Python source files for errors')
parser.add_argument('-V', '--version', action='version', version=_get_version())
parser.add_argument('path', nargs='*',
help='Path(s) of Python file(s) to check. STDIN if not given.')
args = parser.parse_args(args=args).path
reporter = modReporter._makeDefaultReporter()
if args:
warnings = checkRecursive(args, reporter)
else:
warnings = check(sys.stdin.read(), '<stdin>', reporter)
raise SystemExit(warnings > 0)
|
|
# -*- coding: utf-8 -*-
""" Synchronisation - Controllers
@author: Amer Tahir
"""
module = "admin"
module_name = T("Synchronization")
log_table = "sync_log"
conflict_table = "sync_conflict"
sync_peer = None
sync_policy = None
import_export_format = "xml"
# Options Menu (available in all Functions' Views)
# - can Insert/Delete items from default menus within a function, if required.
response.menu_options = admin_menu_options
# Web2Py Tools functions
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# Sync webservices don't use sessions, so avoid cluttering up the storage
session.forget()
return service()
# -----------------------------------------------------------------------------
# S3 framework functions
def index():
"Module's Home Page"
return dict(module_name=module_name)
import urllib2
# -----------------------------------------------------------------------------
class RequestWithMethod(urllib2.Request):
""" ???
@todo: add doc string
"""
def __init__(self, method, *args, **kwargs):
""" docstring? """
self._method = method
urllib2.Request.__init__(self, *args, **kwargs)
def get_method(self):
""" docstring? """
return self._method
# -----------------------------------------------------------------------------
class Error:
""" Indicates an HTTP error """
def __init__(self, url, errcode, errmsg, headers, body=None):
""" docstring??? """
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
self.body = body
def __repr__(self):
""" docstring??? """
return (
"Error for %s: %s %s\n Response body: %s" %
(self.url, self.errcode, self.errmsg, self.body)
)
# -----------------------------------------------------------------------------
class FetchURL:
""" docstring ??? """
def fetch(self, request, host, path, data, cookie=None, username=None, password=None):
""" docstring??? """
import httplib, base64
http = httplib.HTTPConnection(host)
# write header
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
if cookie:
headers["Cookie"] = cookie
# auth
if username:
base64string = base64.encodestring("%s:%s" % (username, password))[:-1]
authheader = "Basic %s" % base64string
headers["Authorization"] = authheader
http.request(request, path, data, headers)
# get response
response = http.getresponse()
retcode = response.status
retmsg = response.reason
retbody = None
if retcode != 200:
try:
retbody = response.read()
except:
retbody = None
raise Error(str(host) + str(path), retcode, retmsg, headers, retbody)
return response.read()
# -----------------------------------------------------------------------------
@auth.requires_login()
def now():
""" Manual synchronization """
import urllib, urlparse
import gluon.contrib.simplejson as json
final_status = ""
sync_start = False
if "start" in request.args:
sync_start = True
# retrieve sync now state
state = db().select(db.sync_now.ALL, limitby=(0, 1)).first()
if sync_start:
# retrieve sync partners from DB
peers = db().select(db.sync_partner.ALL)
# retrieve all scheduled jobs set to run manually
job = db.sync_schedule
jobs = db((job.period == "m") & (job.enabled == True)).select(job.ALL)
if not jobs:
final_status = "There are no scheduled jobs. Please schedule a sync operation (set to run manually).<br /><br /><a href=\"" + URL(r=request, c="sync", f="schedule") + "\">Click here</a> to go to Sync Schedules page.<br /><br />\n"
return dict(module_name=module_name,
sync_status=final_status,
sync_start=False,
sync_state=state)
# retrieve settings
settings = db().select(db.sync_setting.ALL, limitby=(0, 1)).first()
# url fetcher
fetcher = FetchURL()
final_status = ""
# Find primay resources
modules = deployment_settings.modules
tables = []
for t in db.tables:
table = db[t]
if t.find("_") == -1:
continue
prefix, name = t.split("_", 1)
if prefix in modules and \
"modified_on" in table.fields and \
"uuid" in table.fields:
is_component = False
hook = s3xrc.model.components.get(name, None)
if hook:
link = hook.get("_component", None)
if link and link.tablename == t:
continue
for h in hook.values():
if isinstance(h, dict):
link = h.get("_component", None)
if link and link.tablename == t:
is_component = True
break
if is_component:
continue
# No component
tables.append(t)
if not state:
# New now-job
res_list = tables # Tables to sync
first_job = None
job_cmd = None
res_list = []
if jobs:
first_job = jobs[0]
if first_job:
if first_job.job_type == 1:
job_cmd = json.loads(first_job.job_command)
res_list = job_cmd["resources"]
# begin new sync now session
sync_jobs_enabled_list = []
for job in jobs:
if job.enabled:
sync_jobs_enabled_list.append(str(job.id))
sync_now_id = db["sync_now"].insert(
sync_jobs = ", ".join(map(str, sync_jobs_enabled_list)),
started_on = request.utcnow,
job_resources_done = "",
job_resources_pending = ", ".join(map(str, res_list)),
job_sync_errors = ""
)
state = db(db.sync_now.id == sync_now_id).select(db.sync_now.ALL, limitby=(0, 1)).first()
final_status += "Sync Now started:<br /><br /><br />\n"
else:
# Now already started
sync_now_id = state.id
final_status += "Sync Now resumed (originally started on " + state.started_on.strftime("%x %H:%M:%S")+ "):<br /><br /><br />\n"
# unlock session - what for?
session._unlock(response)
# become super-user - what for?
session.s3.roles.append(1)
# get job from queue
sync_jobs_list = state.sync_jobs.split(", ")
if "" in sync_jobs_list:
sync_jobs_list.remove("")
sync_job = db(db.sync_schedule.id == int(sync_jobs_list[0])).select(db.sync_schedule.ALL, limitby=(0, 1)).first()
job_cmd = None
if sync_job:
job_cmd = json.loads(sync_job.job_command)
sync_job_partner = job_cmd["partner_uuid"]
peer = db(db.sync_partner.uuid == sync_job_partner).select(limitby=(0, 1)).first()
# Whether a push was successful
push_success = False
if sync_job and peer:
final_status += "<br />Syncing with: " + \
peer.name + ", " + \
peer.instance_url + \
" (" + peer.instance_type + "):<br />\n\n"
peer_sync_success = True
last_sync_on = sync_job.last_run
complete_sync = False
sync_mode = 1
if "complete" in job_cmd and str(job_cmd["complete"]) == "True":
complete_sync = True
if "policy" in job_cmd:
sync_policy = int(job_cmd["policy"])
if "mode" in job_cmd:
sync_mode = int(job_cmd["mode"])
sync_resources = []
sync_errors = ""
# Keep Session for local URLs
cookie = str(response.session_id_name) + "=" + str(response.session_id)
if sync_job.job_type == 1: # Eden <-> Eden
# Sahana Eden sync
# Build msince-query
if last_sync_on is not None and complete_sync == False:
msince = last_sync_on.strftime("%Y-%m-%dT%H:%M:%SZ")
msince_query = "?msince=" + msince
else:
msince = ""
msince_query = ""
# Find resources to sync
tablenames = state.job_resources_pending.split(",")
#for _module, _resource in job_res_list:
for tablename in tablenames:
if tablename not in db.tables:
continue
if tablename.find("||") == -1:
if tablename.find("_") == -1:
continue
else:
prefix, name = tablename.split("_", 1)
else:
prefix, name = tablename.split("||", 1)
resource = s3xrc.resource(prefix, name)
sync_path = "sync/sync/%s/%s.%s" % (prefix, name, import_export_format)
remote_url = urlparse.urlparse(peer.instance_url)
if remote_url.path[-1] != "/":
remote_path = "%s/%s" % (remote_url.path, sync_path)
else:
remote_path = "%s%s" % (remote_url.path, sync_path)
if sync_mode in [1, 3]: # pull
params = dict(msince=msince)
fetch_url = "%s://%s%s?%s" % (remote_url.scheme,
remote_url.netloc,
remote_path,
urllib.urlencode(params))
try:
result = resource.fetch_xml(fetch_url,
username=peer.username,
password=peer.password)
#proxy=settings.proxy)
except SyntaxError, e:
result = str(e)
try:
result_json = json.loads(result)
except:
error = str(result)
else:
if str(result_json["statuscode"]).startswith("2"):
error = None
else:
error = str(result_json["message"])
if error:
tablename_error = "%s (pull error)"
sync_errors = "%s\n%s" % (sync_errors,
"Error while synchronizing %s: %s" % (tablename, error))
sync_resources.append(tablename_error)
final_status += error + "<br /><br />\n"
else:
sync_resources.append(tablename)
final_status += ".........processed %s (Pull Sync)<br />\n" % push_url
if sync_mode in [2, 3]: # push
params = dict(sync_partner_uuid=settings.uuid)
push_url = "%s://%s%s?%s" % (remote_url.scheme,
remote_url.netloc,
remote_path,
urllib.urlencode(params))
try:
result = resource.push_xml(push_url,
username=peer.username,
password=peer.password,
#proxy=settings.proxy,
msince=msince)
except SyntaxError, e:
result = str(e)
try:
result_json = json.loads(result)
except:
error = str(result)
else:
if str(result_json["statuscode"]).startswith("2"):
error = None
else:
error = str(result_json["message"])
if error:
tablename_error = "%s (push error)"
sync_errors = "%s\n%s" % (sync_errors,
"Error while synchronizing %s: %s" % (tablename, error))
sync_resources.append(tablename_error)
final_status += error + "<br /><br />\n"
else:
sync_resources.append(tablename)
final_status += ".........processed %s (Push Sync)<br />\n" % push_url
else:
pass
# update sync now state
if state.job_resources_done:
state.job_resources_done += ","
state.job_resources_done += ",".join(map(str, sync_resources))
job_res_pending = state.job_resources_pending.split(",")
if "" in job_res_pending:
job_res_pending.remove("")
if sync_job.job_type == 1:
for tablename in tablenames:
job_res_pending.remove(tablename)
state.job_resources_pending = ",".join(map(str, job_res_pending))
state.job_sync_errors += sync_errors
vals = {"job_resources_done": state.job_resources_done,
"job_resources_pending": state.job_resources_pending,
"job_sync_errors": state.job_sync_errors}
db(db.sync_now.id == sync_now_id).update(**vals)
state = db(db.sync_now.id == sync_now_id).select(db.sync_now.ALL, limitby=(0, 1)).first()
# check if all resources are synced for the current job, i.e. is it done?
if (not state.job_resources_pending) or sync_job.job_type == 2:
# job completed, check if there are any more jobs, if not, then sync now completed
# log sync job
if sync_mode == 1:
sync_method = "Pull"
elif sync_mode == 2:
sync_method = "Push"
elif sync_mode == 3:
sync_method = "Pull-Push"
log_table_id = db[log_table].insert(
partner_uuid = sync_job_partner,
timestmp = datetime.datetime.utcnow(),
sync_resources = state.job_resources_done,
sync_errors = state.job_sync_errors,
sync_mode = "online",
sync_method = sync_method,
complete_sync = complete_sync
)
# remove this job from queue and process next
sync_jobs_list = state.sync_jobs.split(", ")
if "" in sync_jobs_list:
sync_jobs_list.remove("")
if len(sync_jobs_list) > 0:
sync_jobs_list.remove(sync_jobs_list[0])
state.sync_jobs = ", ".join(map(str, sync_jobs_list))
state.job_resources_done = ""
state.job_resources_pending = ""
if len(sync_jobs_list) > 0:
next_job_sel = db(db.sync_schedule.id == int(state.sync_jobs[0])).select(db.sync_schedule.ALL)
if next_job_sel:
next_job = next_job_sel[0]
if next_job.job_type == 1:
next_job_cmd = json.loads(next_job.job_command)
state.job_resources_pending = ", ".join(map(str, next_job_cmd["resources"]))
state.job_sync_errors = ""
vals = {"sync_jobs": state.sync_jobs,
"job_resources_done": state.job_resources_done,
"job_resources_pending": state.job_resources_pending}
db(db.sync_now.id == sync_now_id).update(**vals)
state = db(db.sync_now.id == sync_now_id).select(db.sync_now.ALL, limitby=(0, 1)).first()
# update last_sync_on
vals = {"last_sync_on": datetime.datetime.utcnow()}
db(db.sync_partner.id == peer.id).update(**vals)
vals = {"last_run": datetime.datetime.utcnow()}
db(db.sync_schedule.id == sync_job.id).update(**vals)
if not state.sync_jobs:
# remove sync now session state
db(db.sync_now.id == sync_now_id).delete()
# we're done
final_status += "Sync completed successfully. Logs generated: " + str(A(T("Click here to open log"),_href=URL(r=request, c="sync", f="history"))) + "<br /><br />\n"
return dict(module_name=module_name,
sync_status=final_status,
sync_start=sync_start,
sync_state=state)
# -----------------------------------------------------------------------------
def sync():
""" Sync interface
allows PUT/GET of any resource (universal RESTful controller)
"""
# @todo: Do not use global variables
global sync_peer, sync_policy
import gluon.contrib.simplejson as json
if len(request.args) < 2:
# No resource specified
raise HTTP(501, body=s3xrc.ERROR.BAD_RESOURCE)
else:
prefix = request.args.pop(0)
name = request.args.pop(0)
if name.find(".") != -1:
name, extension = name.rsplit(".", 1)
request.extension = extension
# Get the sync partner
peer_uuid = request.vars.get("sync_partner_uuid", None)
if peer_uuid:
peer = db.sync_partner
sync_peer = db(peer.uuid == peer_uuid).select(limitby=(0,1)).first()
if sync_peer and not sync_policy:
sync_policy = sync_peer.policy
# remote push?
method = request.env.request_method
if method in ("PUT", "POST"):
remote_push = True
# Must be registered partner for push:
if not sync_peer:
raise HTTP(501, body=s3xrc.ERROR.NOT_PERMITTED)
elif method == "GET":
remote_push = False
else:
raise HTTP(501, body=s3xrc.ERROR.BAD_METHOD)
# Set the sync resolver
s3xrc.sync_resolve = lambda vector, peer=sync_peer: sync_res(vector, peer)
def prep(r):
# Do not allow interactive formats
if r.representation in ("html", "popup", "aadata"):
return False
# Do not allow URL methods
if r.method:
return False
# Neutralize push limit of the resource
r.resource.push_limit = None
return True
response.s3.prep = prep
def postp(r, output, sync_peer=sync_peer):
try:
output_json = Storage(json.loads(output))
except:
# No JSON response?
pass
else:
if r.http in ("PUT", "POST"):
resource = r.resource
sr = [c.component.tablename for c in resource.components.values()]
sr.insert(0, resource.tablename)
sync_resources = ",".join(sr)
if str(output_json["statuscode"]) != "200":
sync_resources += " (error)"
sync_errors = str(output)
else:
sync_errors = ""
db[log_table].insert(
partner_uuid = sync_peer.uuid,
timestmp = datetime.datetime.utcnow(),
sync_resources = sync_resources,
sync_errors = sync_errors,
sync_mode = "online",
sync_method = "Remote Push",
complete_sync = False)
return output
response.s3.postp = postp
# Execute the request
output = shn_rest_controller(prefix, name)
#return ret_data
return output
# -----------------------------------------------------------------------------
def sync_res(vector, peer):
""" Sync resolver
designed as callback for s3xrc.sync_resolve
"""
import cPickle
global sync_policy
sync_peer = peer
if not sync_policy:
sync_policy = sync_peer.policy
db_record = vector.db(vector.table.id==vector.id).select(vector.table.ALL, limitby=(0,1))
db_record_mtime = None
record_dump = cPickle.dumps(dict(vector.record), 0)
if db_record:
db_record = db_record.first()
if "modified_on" in vector.table.fields:
db_record_mtime = db_record.modified_on
# based on the sync_policy, make resolution
if sync_policy == 0: # No Sync
# don't import anything in this case
vector.strategy = []
elif sync_policy == 1: # Keep Local
vector.resolution = vector.RESOLUTION.THIS
if db_record_mtime and vector.mtime > db_record_mtime:
# log this as a conflict, local record is older
#print "Conflict: local record is kept and is older"
db[conflict_table].insert(
uuid = vector.record.uuid,
resource_table = vector.tablename,
remote_record = record_dump,
remote_modified_by = vector.element.get("modified_by"),
remote_modified_on = vector.mtime,
logged_on = datetime.datetime.utcnow(),
resolved = False
)
elif sync_policy == 2: # Replace with Remote
vector.resolution = vector.RESOLUTION.OTHER
if db_record_mtime and vector.mtime < db_record_mtime:
# log this as a conflict, remote record is older
#print "Conflict: remote record is imported and is older"
db[conflict_table].insert(
uuid = vector.record.uuid,
resource_table = vector.tablename,
remote_record = record_dump,
remote_modified_by = vector.element.get("modified_by"),
remote_modified_on = vector.mtime,
logged_on = datetime.datetime.utcnow(),
resolved = False
)
elif db_record_mtime and sync_peer.last_sync_on and db_record_mtime > sync_peer.last_sync_on:
# log this as a conflict, local record was modified too, but overwritten
#print "Conflict: local record was modified since last sync but overwritten by remote record"
db[conflict_table].insert(
uuid = vector.record.uuid,
resource_table = vector.tablename,
remote_record = record_dump,
remote_modified_by = vector.element.get("modified_by"),
remote_modified_on = vector.mtime,
logged_on = datetime.datetime.utcnow(),
resolved = False
)
elif sync_policy == 3: # Keep with Newer Timestamp
vector.resolution = vector.RESOLUTION.NEWER
if db_record_mtime and vector.mtime < db_record_mtime:
# log this as a conflict, remote record is older
#print "Conflict: remote record is imported and is older"
db[conflict_table].insert(
uuid = vector.record.uuid,
resource_table = vector.tablename,
remote_record = record_dump,
remote_modified_by = vector.element.get("modified_by"),
remote_modified_on = vector.mtime,
logged_on = datetime.datetime.utcnow(),
resolved = False
)
elif sync_policy == 4: # Role-based
# not implemented, defaulting to "Newer Timestamp"
vector.resolution = vector.RESOLUTION.NEWER
elif sync_policy == 5: # Choose Manually
if db_record_mtime and vector.mtime != db_record_mtime:
# just log and skip
vector.strategy = []
db[conflict_table].insert(
uuid = vector.record.uuid,
resource_table = vector.tablename,
remote_record = record_dump,
remote_modified_by = vector.element.get("modified_by"),
remote_modified_on = vector.mtime,
logged_on = datetime.datetime.utcnow(),
resolved = False
)
return
# -----------------------------------------------------------------------------
@auth.shn_requires_membership(1)
def partner():
""" Synchronisation Partners """
import gluon.contrib.simplejson as json
table = db.sync_partner
table.uuid.label = "UUID"
table.uuid.comment = DIV(SPAN("*", _class="req"), DIV(_class="tooltip",
_title="UUID|" + Tstr("The unique identifier of the sync partner. Leave blank if the instance type is not Sahana Eden, it will be auto-assigned in that case.")))
table.name.label = T("Name")
table.name.comment = DIV(_class="tooltip",
_title=Tstr("Name") + "|" + Tstr("The descriptive name of the sync partner."))
table.instance_url.label = T("Instance URL")
table.instance_url.comment = DIV(SPAN("*", _class="req"), DIV(_class="tooltip",
_title=Tstr("Instance URL") + "|" + Tstr("For Eden instances - this is the application URL, e.g. http://sync.sahanfoundation.org/eden. For non-Eden instances, this is the Full ")))
table.instance_type.label = T("Instance Type")
table.instance_type.comment = DIV(SPAN("*", _class="req"), DIV(_class="tooltip",
_title=Tstr("Instance Type") + "|" + Tstr("Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.")))
table.username.label = T("Sync Username")
table.username.comment = DIV(_class="tooltip",
_title=Tstr("Sync Username") + "|" + Tstr("Username used to login when synchronising with this partner. Note that only HTTP Basic authentication is supported."))
table.password.label = T("Sync Password")
table.password.comment = DIV(_class="tooltip",
_title=Tstr("Sync Password") + "|" + Tstr("Password used to login when synchronising with this partner. Note that only HTTP Basic authentication is supported."))
table.comments.label = T("Comments")
table.comments.comment = DIV(_class="tooltip",
_title=Tstr("Comments") + "|" + Tstr("Any comments about this sync partner."))
table.policy.label = T("Sync Policy")
table.policy.comment = DIV(SPAN("*", _class="req"), DIV(_class="tooltip",
_title=Tstr("Sync Policy") + "|" + Tstr("The policy to use while synchronising with this partner. All policies other than 'No Sync' come into effect when conflicts arise.")))
table.sync_pools.readable = False
table.sync_pools.writable = False
table.password.readable = False
table.last_sync_on.writable = False
# CRUD Strings - @todo: make new style
title_create = T("Add Partner")
title_display = T("Partner Details")
title_list = T("List Partners")
title_update = T("Edit Partner")
title_search = T("Search Partners")
subtitle_create = T("Add New Partner")
subtitle_list = T("Partners")
label_list_button = T("List Partners")
label_create_button = T("Add Partner")
label_search_button = T("Search Partners")
msg_record_created = T("Partner added")
msg_record_modified = T("Partner updated")
msg_record_deleted = T("Partner deleted")
msg_list_empty = T("No Partners currently registered")
s3.crud_strings.sync_partner = Storage(title_create=title_create,title_display=title_display,title_list=title_list,title_update=title_update,title_search=title_search,subtitle_create=subtitle_create,subtitle_list=subtitle_list,label_list_button=label_list_button,label_create_button=label_create_button,msg_record_created=msg_record_created,msg_record_modified=msg_record_modified,msg_record_deleted=msg_record_deleted,msg_list_empty=msg_list_empty)
if "delete" in request.args:
peer_sel = db(db.sync_partner.id==int(request.args[0])).select(db.sync_partner.ALL)
peer_uuid = None
if peer_sel:
peer_uuid = peer_sel[0].uuid
if peer_uuid:
sch_jobs_del = []
sch_jobs = db().select(db.sync_schedule.ALL)
for sch_job in sch_jobs:
sch_job_cmd = json.loads(sch_job.job_command)
if sch_job_cmd["partner_uuid"] == peer_uuid:
sch_jobs_del.append(sch_job.id)
if sch_jobs_del:
db(db.sync_schedule.id.belongs(sch_jobs_del)).delete()
elif (not "update" in request.args) and len(request.vars) > 0:
# add new partner
random_uuid = str(uuid.uuid4())
new_instance_type = ""
if "instance_type" in request.vars:
new_instance_type = request.vars["instance_type"]
if new_instance_type != "Sahana Eden":
if "uuid" in request.vars:
request.vars["uuid"] = random_uuid
if "uuid" in request.get_vars:
request.get_vars["uuid"] = random_uuid
if "uuid" in request.post_vars:
request.post_vars["uuid"] = random_uuid
elif "uuid" in request.vars and request.vars["uuid"] and "instance_url" in request.vars and request.vars["instance_url"]:
# create new default scheduled job for this partner, it's a Sahana Eden instance
modules = deployment_settings.modules
_db_tables = db.tables
db_tables = []
for __table in _db_tables:
if "modified_on" in db[__table].fields and "uuid" in db[__table].fields:
db_tables.append(__table)
sch_resources = []
for _module in modules:
for _table in db_tables:
if _table.startswith(_module + "_"):
sch_resources.append(_module + "||" + _table[len(_module)+1:])
# add job to db
new_partner_uuid = request.vars["uuid"]
new_partner_instance_type = request.vars["instance_type"]
new_partner_policy = int(request.vars["policy"])
new_partner_name = None
if "name" in request.vars and request.vars["name"]:
new_partner_name = request.vars["name"]
sch_comments = "Default manually triggered schedule job for sync partner '"
if new_partner_name:
sch_comments += new_partner_name
else:
sch_comments += new_partner_uuid
sch_comments += "'"
sch_cmd = dict()
sch_cmd["partner_uuid"] = new_partner_uuid
sch_cmd["policy"] = new_partner_policy
sch_cmd["resources"] = sch_resources
sch_cmd["complete"] = False
sch_cmd["mode"] = 3
db["sync_schedule"].insert(
comments = sch_comments,
period = "m",
hours = None,
days_of_week = None,
time_of_day = None,
runonce_datetime = None,
job_type = 1,
job_command = json.dumps(sch_cmd),
last_run = None,
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now()
)
return shn_rest_controller("sync", "partner")
# -----------------------------------------------------------------------------
@auth.shn_requires_membership(1)
def setting():
"Synchronisation Settings"
if not "update" in request.args:
redirect(URL(r=request, args=["update", 1]))
db.sync_setting.uuid.writable = False
db.sync_setting.uuid.label = "UUID"
db.sync_setting.uuid.comment = DIV(_class="tooltip",
_title="UUID|" + Tstr("The unique identifier which identifies this instance to other instances."))
db.sync_setting.comments.label = T("Comments")
db.sync_setting.comments.comment = DIV(_class="tooltip",
_title=Tstr("Comments") + "|" + Tstr("Any comments for this instance."))
# db.sync_setting.beacon_service_url.label = T("Beacon Service URL")
# db.sync_setting.beacon_service_url.comment = DIV(_class="tooltip",
# _title=Tstr("Beacon Service URL") + "|" + Tstr("Beacon service allows searching for other instances that wish to synchronise. This is the URL of the beacon service this instance will use."))
db.sync_setting.sync_pools.readable = False
db.sync_setting.sync_pools.writable = False
db.sync_setting.beacon_service_url.readable = False
db.sync_setting.beacon_service_url.writable = False
title_update = T("Edit Sync Settings")
label_list_button = T("Sync Settings")
msg_record_modified = T("Sync Settings updated")
s3.crud_strings.sync_setting = Storage(title_update=title_update,label_list_button=label_list_button,msg_record_modified=msg_record_modified)
crud.settings.update_next = URL(r=request, args=["update", 1])
return shn_rest_controller("sync", "setting", deletable=False, listadd=False)
# -----------------------------------------------------------------------------
@auth.shn_requires_membership(1)
def schedule():
""" Synchronisation Schedules """
import gluon.contrib.simplejson as json
title = T("Syncronisation Schedules")
jobs = None
confirmation_msg = None
if "create" in request.args:
response.view = "sync/schedule_create.html"
if "form_action" in request.vars and request.vars["form_action"] == "submit":
# create new job - add it to database
sch_enabled = True
if "job_enabled" in request.vars and request.vars["job_enabled"] == "0":
sch_enabled = False
sch_comments = None
if "comments" in request.vars:
sch_comments = request.vars["comments"]
sch_source_type = "eden"
if "sync_data_source_type" in request.vars:
sch_source_type = request.vars["sync_data_source_type"]
sch_period = "h"
if "sync_schedule_period" in request.vars:
sch_period = request.vars["sync_schedule_period"]
sch_period_hours = 5
if "sync_schedule_period_hours" in request.vars:
sch_period_hours = request.vars["sync_schedule_period_hours"]
sch_days_of_week = []
if "sync_schedule_weekly_days" in request.vars and request.vars["sync_schedule_weekly_days"]:
sch_days_of_week = request.vars["sync_schedule_weekly_days"]
sch_time_of_day = None
if sch_period == "d":
sch_time_of_day = datetime.datetime.strptime(str(request.vars["sync_schedule_daily_time"]), "%H:%M").time()
elif sch_period == "w":
sch_time_of_day = datetime.datetime.strptime(str(request.vars["sync_schedule_weekly_time"]), "%H:%M").time()
sch_runonce_datetime = None
if "sync_schedule_once_datetime" in request.vars and request.vars["sync_schedule_once_datetime"]:
sch_runonce_datetime = datetime.datetime.strptime(str(request.vars["sync_schedule_once_datetime"]), "%Y-%m-%d %H:%M:%S")
sch_job_type = 1
sch_cmd = dict()
sch_cmd["partner_uuid"] = request.vars["sync_partner_uuid"]
sch_cmd["policy"] = int(request.vars["sync_policy"])
if sch_source_type == "eden":
# eden data source
if "sync_resources" in request.vars and request.vars["sync_resources"]:
sch_cmd["resources"] = request.vars["sync_resources"]
if type(sch_cmd["resources"]) == str:
sch_cmd["resources"] = [sch_cmd["resources"]]
else:
sch_cmd["resources"] = None
sch_cmd["complete"] = False
if "sync_complete" in request.vars and request.vars["sync_complete"] == "1":
sch_cmd["complete"] = True
sch_cmd["mode"] = 3
if "sync_mode" in request.vars and request.vars["sync_mode"]:
sch_cmd["mode"] = int(request.vars["sync_mode"])
else:
# custom data source
sch_job_type = 2
sch_cmd["custom_command"] = request.vars["sync_custom"]
# add job to db
db["sync_schedule"].insert(
comments = sch_comments,
period = sch_period,
hours = sch_period_hours,
days_of_week = ",".join(map(str, sch_days_of_week)),
time_of_day = sch_time_of_day,
runonce_datetime = sch_runonce_datetime,
job_type = sch_job_type,
job_command = json.dumps(sch_cmd),
last_run = None,
enabled = sch_enabled,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now()
)
confirmation_msg = "New Scheduled job created"
response.view = "sync/schedule.html"
else:
if "form_action" in request.vars and "selected_jobs" in request.vars:
sel_jobs = request.vars["selected_jobs"]
if request.vars["form_action"] == "enable":
for s_job_id in sel_jobs:
vals = {"enabled": True}
db(db.sync_schedule.id==int(s_job_id)).update(**vals)
elif request.vars["form_action"] == "disable":
for s_job_id in sel_jobs:
vals = {"enabled": False}
db(db.sync_schedule.id==int(s_job_id)).update(**vals)
elif request.vars["form_action"] == "delete":
for s_job_id in sel_jobs:
db(db.sync_schedule.id==int(s_job_id)).delete()
jobs = db().select(db.sync_schedule.ALL)
return dict(title=title, jobs=jobs, confirmation_msg=confirmation_msg)
# -----------------------------------------------------------------------------
def schedule_cron():
# only accept requests from local machine
if not request.env.remote_addr == "127.0.0.1":
return
while True:
try:
# look at each job and run if it it's scheduled time
jobs = db(db.sync_schedule.enabled==True).select(db.sync_schedule.ALL)
for job in jobs:
last_run = job.last_run
if not last_run:
last_run = job.created_on - datetime.timedelta(days=2)
try:
if job.period == "h":
if datetime.datetime.now() >= (last_run + datetime.timedelta(hours=job.hours)):
schedule_process_job(job.id)
db.commit()
elif job.period == "d":
if job.time_of_day and last_run.date() != datetime.datetime.now().date() and datetime.datetime.now().hour >= job.time_of_day.hour and datetime.datetime.now().minute >= job.time_of_day.minute:
schedule_process_job(job.id)
db.commit()
elif job.period == "w":
days_of_week = None
last_run_weekday = last_run.weekday() + 1
if last_run_weekday == 8:
last_run_weekday = 1
now_weekday = datetime.datetime.now().weekday() + 1
if now_weekday == 8:
now_weekday = 1
if job.days_of_week:
days_of_week = map(int, job.days_of_week.split(","))
if job.time_of_day and now_weekday in days_of_week and last_run_weekday < now_weekday and datetime.datetime.now().hour >= job.time_of_day.hour and datetime.datetime.now().minute >= job.time_of_day.minute:
schedule_process_job(job.id)
db.commit()
elif job.period == "o":
if job.runonce_datetime and last_run < job.runonce_datetime and datetime.datetime.now() >= job.runonce_datetime:
schedule_process_job(job.id)
db.commit()
except Error, e:
# log scheduler error
try:
log_file = open("applications/" + request.application + "/cron/scheduler_errors.txt", "a")
log_file.write(str(datetime.datetime.now()) + " - error while running job " + str(job.id) + ":\n" + str(e) + "\n\n")
log_file.close()
except:
pass
#print "error while appending scheduler error log file!"
db.commit()
except Error, e:
# log scheduler error
try:
log_file = open("applications/" + request.application + "/cron/scheduler_errors.txt", "a")
log_file.write(str(datetime.datetime.now()) + " - error while running job " + str(job.id) + ":\n" + str(e) + "\n\n")
log_file.close()
except:
pass
#print "error while appending scheduler error log file!"
# pause for 15 seconds
time.sleep(15)
return
# -----------------------------------------------------------------------------
def schedule_process_job(job_id):
""" docstring??? """
import gluon.contrib.simplejson as json
import urllib, urlparse
global sync_policy
job_sel = db(db.sync_schedule.id==job_id).select(db.sync_schedule.ALL)
if not job_sel:
return
job = job_sel[0]
if not job:
return
if not job.enabled:
return
job_cmd = json.loads(job.job_command)
# url fetcher
fetcher = FetchURL()
# retrieve settings
settings = db().select(db.sync_setting.ALL)[0]
peer_sel = db(db.sync_partner.uuid==str(job_cmd["partner_uuid"])).select(db.sync_partner.ALL)
if not peer_sel:
return
peer = peer_sel[0]
peer_sync_success = True
last_sync_on = job.last_run
complete_sync = False
sync_mode = 1
if "complete" in job_cmd and str(job_cmd["complete"]) == "True":
complete_sync = True
if "policy" in job_cmd:
sync_policy = int(job_cmd["policy"])
if "mode" in job_cmd:
sync_mode = int(job_cmd["mode"])
sync_resources = []
sync_errors = ""
# Keep Session for local URLs
cookie = str(response.session_id_name) + "=" + str(response.session_id)
if job.job_type == 1:
# Sync Eden sync
if (not last_sync_on is None) and complete_sync == False:
last_sync_on_str = "?msince=" + last_sync_on.strftime("%Y-%m-%dT%H:%M:%SZ")
else:
last_sync_on_str = ""
log_file = open("applications/" + request.application + "/cron/scheduler_log.txt", "a")
log_file.write(str(datetime.datetime.now()) + " - running job " + str(job.id) + "\n")
log_file.close()
for res_item in job_cmd["resources"]:
_module, _resource = res_item.split("||")
_resource_name = _module + "_" + _resource
peer_instance_url = list(urlparse.urlparse(peer.instance_url))
if peer_instance_url[2].endswith("/")==False:
peer_instance_url[2] += "/"
resource_remote_pull_url = peer.instance_url
if resource_remote_pull_url.endswith("/")==False:
resource_remote_pull_url += "/"
resource_remote_pull_url += "sync/sync." + import_export_format + "/" + _module + "/" + _resource + last_sync_on_str
resource_remote_push_url = peer_instance_url[2] + "sync/sync." + import_export_format + "/push/" + _module + "/" + _resource + "?sync_partner_uuid=" + str(settings.uuid)
resource_local_pull_url = "/" + request.application + "/sync/sync." + import_export_format + "/" + _module + "/" + _resource + last_sync_on_str
resource_local_push_url = "/" + request.application + "/sync/sync." + import_export_format + "/create/" + _module + "/" + _resource
if sync_mode in [1, 3]:
# Sync -> Pull
_request_params = urllib.urlencode({"sync_partner_uuid": str(peer.uuid), "fetchurl": resource_remote_pull_url})
_request_vars_copy = request.vars
_request_get_vars_copy = request.get_vars
_request_post_vars_copy = request.post_vars
_request_args_copy = request.args
_request_extension_copy = request.extension
_request_env_request_method_copy = request.env.request_method
try:
#_response = fetcher.fetch("PUT", request.env.http_host, resource_local_push_url, _request_params, cookie)
request.vars = Storage()
request.vars["sync_partner_uuid"] = str(peer.uuid)
request.vars["fetchurl"] = resource_remote_pull_url
request.args = ["push", _module, _resource]
request.extension = import_export_format
request.env.request_method = "PUT"
session.auth = Storage()
session.auth["user"] = None
session.s3.roles.append(1)
_response = sync()
except Error, e:
if not _resource_name + " (error)" in sync_resources and not _resource_name in sync_resources:
sync_resources.append(_resource_name + " (error)")
error_str = str(e)
sync_errors += "Error while syncing => " + _resource_name + ": \n" + error_str + "\n\n"
#print "Error while syncing => " + _resource_name + ": \n" + error_str + "\n\n"
else:
if not _resource_name + " (error)" in sync_resources and not _resource_name in sync_resources:
sync_resources.append(_resource_name)
request.args = _request_args_copy
request.get_vars = _request_get_vars_copy
request.post_vars = _request_post_vars_copy
request.vars = _request_vars_copy
request.extension = _request_extension_copy
request.env.request_method = _request_env_request_method_copy
if sync_mode in [2, 3]:
# Sync -> Push
try:
_local_data = fetcher.fetch("GET", request.env.http_host, resource_local_pull_url, None, cookie)
_response = fetcher.fetch("PUT", peer_instance_url[1], resource_remote_push_url, _local_data, None, peer.username, peer.password)
except Error, e:
if not _resource_name + " (error)" in sync_resources and not _resource_name in sync_resources:
sync_resources.append(_resource_name + " (error)")
error_str = str(e)
sync_errors += "Error while syncing => " + _resource_name + ": \n" + error_str + "\n\n"
else:
if not _resource_name + " (error)" in sync_resources and not _resource_name in sync_resources:
sync_resources.append(_resource_name)
else:
# Custom sync
sync_mode = 1
_request_vars_copy = request.vars
_request_get_vars_copy = request.get_vars
_request_post_vars_copy = request.post_vars
_request_args_copy = request.args
_request_extension_copy = request.extension
try:
request.vars = Storage()
request.vars["sync_partner_uuid"] = str(peer.uuid)
request.vars["fetchurl"] = job_cmd["custom_command"]
request.args = ["create", "sync", "log"]
request.extension = import_export_format
_response = sync()
except Error, e:
error_str = str(e)
sync_errors = "Error while syncing job " + str(job.id) + ": \n" + error_str + "\n\n"
#print sync_errors
request.args = _request_args_copy
request.get_vars = _request_get_vars_copy
request.post_vars = _request_post_vars_copy
request.vars = _request_vars_copy
request.extension = _request_extension_copy
if sync_mode == 1:
sync_method = "Pull"
elif sync_mode == 2:
sync_method = "Push"
elif sync_mode == 3:
sync_method = "Pull-Push"
# log sync job
log_table_id = db[log_table].insert(
partner_uuid = peer.uuid,
timestmp = datetime.datetime.utcnow(),
sync_resources = ", ".join(map(str, sync_resources)),
sync_errors = sync_errors,
sync_mode = "online",
sync_method = sync_method,
complete_sync = complete_sync
)
# update last_sync_on
vals = {"last_sync_on": datetime.datetime.utcnow()}
db(db.sync_partner.id==peer.id).update(**vals)
vals = {"last_run": datetime.datetime.utcnow()}
db(db.sync_schedule.id==job_id).update(**vals)
return
# -----------------------------------------------------------------------------
@auth.requires_login()
def history():
""" Shows history of database synchronisations
@todo: argument list processing too vulnerable
"""
title = T("Synchronisation History")
table = db[log_table]
if len(request.args) > 0:
logs = db(table.id==int(request.args[0])).select(table.ALL, orderby=table.timestmp)
else:
logs = db().select(table.ALL, orderby=table.timestmp)
return dict(title=title, logs=logs)
# -----------------------------------------------------------------------------
@auth.shn_requires_membership(1)
def conflict():
""" Conflict Resolution UI """
import cPickle
title = T("Conflict Resolution")
def get_modified_by(user_email):
modified_by = user_email
user = db(db.auth_user.email == user_email).select().first()
if user:
modified_by = user.first_name
if user.last_name:
modified_by += " " + user.last_name
return modified_by
skip_fields = ["uuid", "id"]
field_errors = dict()
conflicts = db(db[conflict_table].resolved==False).select(db[conflict_table].ALL, orderby=db[conflict_table].logged_on)
for idx in xrange(0, len(conflicts)):
if not conflicts[idx].resource_table in db.tables:
del conflicts[idx]
record_nbr = 1
if "record_nbr" in request.vars:
record_nbr = int(request.vars["record_nbr"])
total_conflicts = len(conflicts)
if record_nbr < 1 or record_nbr > total_conflicts:
record_nbr = 1
if total_conflicts == 0:
conflict = None
else:
conflict = conflicts[record_nbr - 1]
remote_record = None
local_record = None
local_modified_by = None
remote_modified_by = None
if conflict:
remote_record = cPickle.loads(conflict.remote_record)
local_record = db(db[conflict.resource_table].uuid==conflict.uuid).select().first()
if conflict.remote_modified_by:
remote_modified_by = get_modified_by(conflict.remote_modified_by)
if "modified_by" in local_record:
local_modified_by = get_modified_by(local_record.modified_by.email)
if "form_action" in request.vars:
if request.vars["form_action"] == "resolve" and conflict:
if local_record:
# update local record
for field in remote_record:
if (not field in skip_fields) and (field in db[conflict.resource_table].fields):
if "final_"+str(field) in request.vars:
vals = {field: request.vars["final_" + str(field)]}
else:
if db[conflict.resource_table][field].type == "boolean":
vals = {field: "False"}
else:
vals = {field: ""}
field_error = db[conflict.resource_table][field].validate(vals[field])[1]
if field_error:
field_errors[field] = field_error
# update only if no errors
if len(field_errors) == 0:
db(db[conflict.resource_table].uuid==conflict.uuid).update(**vals)
# undelete record
if "deleted" in db[conflict.resource_table].fields:
vals = {"deleted": False}
db(db[conflict.resource_table].uuid==conflict.uuid).update(**vals)
else:
# insert record
new_rec = dict()
for field in remote_record:
if field in db[conflict_table].fields:
if "final_"+field in request.vars:
new_rec[field] = request.vars["final_"+field]
else:
new_rec[field] = remote_record[field]
field_error = db[conflict.resource_table][field].validate(vals[field])[1]
if field_error:
field_errors[field] = field_error
# insert only if no errors
if len(field_errors) == 0:
db[conflict.resource_table].insert(**new_rec)
# set status to resolved if no errors
if len(field_errors) == 0:
conflict.update_record(resolved = True)
# next conflict
conflicts = db(db[conflict_table].resolved==False).select(db[conflict_table].ALL, orderby=db[conflict_table].logged_on)
for idx in xrange(0, len(conflicts)):
if not conflicts[idx].resource_table in db.tables:
del conflicts[idx]
total_conflicts = len(conflicts)
if record_nbr < 1 or record_nbr > total_conflicts:
record_nbr = 1
if total_conflicts == 0:
conflict = None
else:
conflict = conflicts[record_nbr - 1]
remote_record = None
local_record = None
if conflict:
remote_record = cPickle.loads(conflict.remote_record)
local_record = db(db[conflict.resource_table].uuid==conflict.uuid).select().first()
if conflict.remote_modified_by:
remote_modified_by = get_modified_by(conflict.remote_modified_by)
if "modified_by" in local_record:
local_modified_by = get_modified_by(local_record.modified_by.email)
form = None
if conflict:
form = SQLFORM.factory(db[conflict.resource_table])
return dict(title=title,
skip_fields=skip_fields,
total_conflicts=total_conflicts,
conflict=conflict,
record_nbr=record_nbr,
local_record=local_record,
remote_record=remote_record,
local_modified_by=local_modified_by,
remote_modified_by=remote_modified_by,
form=form,
field_errors=field_errors)
# -----------------------------------------------------------------------------
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Email'
db.create_table('sentry_email', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('email', self.gf('sentry.db.models.fields.citext.CIEmailField')(unique=True, max_length=75)),
('date_added', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('sentry', ['Email'])
def backwards(self, orm):
# Deleting model 'Email'
db.delete_table('sentry_email')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'ac8eec20ab4f4f9cba2fcfcb6caace5102d30921086f4857b917135d667d1399'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'b8242a9eb79141a7a3eca88d9ff98f39e0ab40bbe5ac44418dcc68992d0c3bde'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Chief Badger'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'9c373033c7964af6a649e4bf8b7e3199'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 8, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 10, 7, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'2645fefe1c5b4762b6bdfd60b8c62bbc0702ed6f239e4892b60dd2ad1ead799a'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'cb00e6048c2a4b13bab8603c18b8f6e3cdf4c87428d5479f901a853fe171875a'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 14, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 10, 7, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'2bb47b23f4f14c288d9ca0b8eea5cdb6'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'eKXH0O19GnlrYRLwYuXw8f6cCLOGb3ky'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
|
|
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration
-------------
Central module for storing and working with a live configuration objects. This
module instances :class:`.ConfigurationWithCallbacks` onto :const:`.config`.
Attempting to reload this module will not reinstance the :const:`.config`
object.
The :const:`.config` object should be directly imported from this
module to be used:
>>> from pyfarm.agent.config import config
"""
import os
from datetime import datetime
from os.path import join, abspath, dirname
from logging import _levelNames, getLogger as _getLogger
from pyfarm.core.enums import NOTSET, STRING_TYPES
from pyfarm.core.config import Configuration
from pyfarm.agent.logger import getLogger
from pyfarm.agent.sysinfo import memory, cpu, network
logger = getLogger("agent.config")
class LoggingConfiguration(Configuration):
"""
Special configuration object which logs when a key is changed in
a dictionary. If the reactor is not running then log messages will
be queued until they can be emitted so they are not lost.
.. automethod:: _expandvars
"""
MODIFIED = "modified"
CREATED = "created"
DELETED = "deleted"
def __init__(self, data=None, environment=None, load=True):
super(LoggingConfiguration, self).__init__("pyfarm.agent")
assert data is None or isinstance(data, dict)
assert environment is None or isinstance(environment, dict)
if environment is None:
environment = os.environ
if load:
self.load(environment=environment)
self.update(
# A mapping of UUIDs to job type instances.
jobtypes={},
# A mapping of tasks to job type instances.
current_assignments={},
# The last time we were in touch with the master,
# or the last time it was in touch with us.
last_master_contact=None,
# The last time we announced ourselves to the master. This
# may be longer than --master-reannounce if
# `last_master_contact` caused us to skip an announcement.
last_announce=None)
if data is not None:
self.update(data)
# Load configuration file(s) for jobtypes and then
# update the local instance
if load:
jobtypes_config = Configuration(
"pyfarm.jobtypes", version=self.version)
jobtypes_config.load(environment=environment)
self.update(jobtypes_config)
def _map_value(self, key, value):
"""
Some configuration values have keywords associated with
them, this function is responsible for returning the 'fixed'
value.
"""
if value == "auto":
if key == "agent_ram":
return memory.total_ram()
if key == "agent_cpus":
return cpu.total_cpus()
if key == "agent_hostname":
return network.hostname()
if key == "agent_static_root":
return abspath(
join(dirname(__file__), "http", "static"))
return value
def __setitem__(self, key, value):
value = self._map_value(key, value)
if key not in self:
self.changed(self.CREATED, key, value, NOTSET)
elif self[key] != value:
self.changed(self.MODIFIED, key, value, self[key])
# Run the base class's method after the above otherwise
# the value would already be in the data we're comparing
# against
super(LoggingConfiguration, self).__setitem__(key, value)
def __delitem__(self, key):
"""
Deletes the provided ``key`` and triggers a ``delete`` event
using :meth:`.changed`.
"""
old_value = self[key] if key in self else NOTSET
super(LoggingConfiguration, self).__delitem__(key)
self.changed(self.DELETED, key, NOTSET, old_value)
def pop(self, key, *args):
"""
Deletes the provided ``key`` and triggers a ``delete`` event
using :meth:`.changed`.
"""
old_value = self[key] if key in self else NOTSET
super(LoggingConfiguration, self).pop(key, *args)
self.changed(self.DELETED, key, NOTSET, old_value)
def clear(self):
"""
Deletes all keys in this object and triggers a ``delete`` event
using :meth:`.changed` for each one.
"""
keys = list(self.keys())
# Not quite the same thing as dict.clear() but the effect
# is the same as the call to changed() is more real time.
for key in keys:
old_value = self.pop(key, NOTSET)
self.changed(self.DELETED, key, NOTSET, old_value)
def update(self, data=None, **kwargs):
"""
Updates the data held within this object and triggers the
appropriate events with :meth:`.changed`.
"""
def trigger_changed(changed_object):
try:
items = changed_object.iteritems()
except AttributeError: # pragma: no cover
items = changed_object.items()
for key, value in items:
if key not in self:
self.changed(self.CREATED, key, value, NOTSET)
elif self[key] != value:
self.changed(self.MODIFIED, key, value, self[key])
if isinstance(data, dict):
for key, value in data.items():
data[key] = self._map_value(key, value)
trigger_changed(data)
elif data is not None:
raise TypeError("Expected None or dict for `data`")
elif data is None:
data = {}
if kwargs:
for key, value in kwargs.items():
kwargs[key] = self._map_value(key, value)
trigger_changed(kwargs)
super(LoggingConfiguration, self).update(data, **kwargs)
def changed(self, change_type, key, new_value=NOTSET, old_value=NOTSET):
"""
This method is run whenever one of the keys in this object
changes.
"""
assert new_value is not NOTSET if change_type != self.DELETED else True
assert old_value is NOTSET if change_type == self.CREATED else True
if change_type == self.MODIFIED:
logger.debug("Modified %r = %r", key, new_value)
elif change_type == self.CREATED:
logger.debug("Set %r = %r", key, new_value)
elif change_type == self.DELETED:
logger.debug("Deleted %r", key)
else:
raise NotImplementedError(
"Don't know how to handle change_type %r" % change_type)
def master_contacted(self, update=True, announcement=False):
"""
Simple method that will update the ``last_master_contact`` and then
return the result.
:param bool update:
Setting this value to False will just return the current value
instead of updating the value too.
"""
if not update and "last_master_contact" not in self:
return None
if announcement:
self["last_announce"] = datetime.utcnow()
if update:
self["last_master_contact"] = datetime.utcnow()
return self["last_master_contact"]
class ConfigurationWithCallbacks(LoggingConfiguration):
"""
Subclass of :class:`.LoggingDictionary` that provides the ability to
run a function when a value is changed.
"""
callbacks = {}
@classmethod
def register_callback(cls, key, callback, append=False):
"""
Register a function as a callback for ``key``. When ``key``
is set the given ``callback`` will be run by :meth:`.changed`
:param string key:
the key which when changed in any way will execute
``callback``
:param callable callback:
the function or method to register
:param boolean append:
by default attempting to register a callback which has
already been registered will do nothing, setting this
to ``True`` overrides this behavior.
"""
assert callable(callback)
callbacks = cls.callbacks.setdefault(key, [])
if callback in callbacks and not append:
logger.debug(
"%r is already a registered callback for %r", callback, key)
return
callbacks.append(callback)
logger.debug("Registered callback %r for %r", callback, key)
@classmethod
def deregister_callback(cls, key, callback):
"""
Removes any callback(s) that are registered with the provided ``key``
"""
results = cls.callbacks.pop(key, None)
if results is None: # pragma: no cover
logger.debug(
"%r is not a registered callback for %r", callback, key)
def clear(self, callbacks=False):
"""
Performs the same operations as :meth:`dict.clear` except
this method can also clear any registered callbacks if
requested.
"""
super(ConfigurationWithCallbacks, self).clear()
if callbacks:
self.callbacks.clear()
def changed(self, change_type, key, new_value=NOTSET, old_value=NOTSET):
"""
This method is called internally whenever a given ``key``
changes which in turn will pass off the change to any
registered callback(s).
"""
super(ConfigurationWithCallbacks, self).changed(
change_type, key, new_value=new_value, old_value=old_value)
if key in self.callbacks:
for callback in self.callbacks[key]:
callback(change_type, key, new_value, old_value)
logger.debug(
"Key %r was %r, calling callback %s",
key, change_type, callback)
def configure_logger_level():
"""
When called this will set the root logger level based
on the ``agent_global_logger_level`` configuration
variable.
"""
# Import here to prevent circular imports and because we
# don't want CONFIGURATION in the namespace of this module.
from pyfarm.agent.logger.twistd import CONFIGURATION
root_level = config["agent_global_logger_level"]
if isinstance(root_level, STRING_TYPES):
root_level = _levelNames[root_level.upper()]
assert isinstance(root_level, int)
levels = CONFIGURATION["levels"]
for index, (name, level) in enumerate(levels):
if name == "":
levels[index] = ("", root_level)
break
else:
levels.insert(0, ("", root_level))
# Just to be safe, we also set pf's root level
pf = _getLogger("pf")
pf.setLevel(root_level)
# Prevent a call to reload() from dumping the config object
try:
config
except NameError:
config = ConfigurationWithCallbacks()
configure_logger_level()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, no-self-use, too-many-locals, broad-except, too-many-lines, unnecessary-pass
"""numpy interface for operators."""
import traceback
import warnings
import collections
from array import array
from threading import Lock
import ctypes
from ctypes import CFUNCTYPE, POINTER, Structure, pointer
from ctypes import c_void_p, c_int, c_char, c_char_p, cast, c_bool
from .base import _LIB, check_call, MXCallbackList, c_array, c_array_buf, mx_int, OpHandle
from .base import c_str, mx_uint, mx_float, ctypes2numpy_shared, NDArrayHandle, py_str
from . import symbol, context
from .ndarray import NDArray, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ID_TO_STR
from .ndarray.ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray.ndarray import _STORAGE_TYPE_CSR, _STORAGE_TYPE_ROW_SPARSE
from .ndarray import _ndarray_cls
from .numpy.multiarray import _np_ndarray_cls
from .util import is_np_array
c_int_p = POINTER(c_int)
class PythonOp(object):
"""Base class for operators implemented in Python.
Parameters
----------
need_top_grad : bool
the default need_top_grad() function returns this value.
"""
_ref_holder = []
def __init__(self, need_top_grad=True):
self.info_ = None
self.need_top_grad_ = need_top_grad
warnings.warn('PythonOp has been deprecated. Please use CustomOp')
def __call__(self, *args, **kwargs):
return self.get_symbol(*args, **kwargs)
def get_symbol(self, *args, **kwargs):
"""Create a symbol from numpy operator.
This should only be called once per instance if the operator contains
internal states.
Parameters
----------
args : list
a list of input arguments (symbols).
Returns
-------
sym : mxnet.symbol.Symbol
"""
raise NotImplementedError("Must override this")
def forward(self, in_data, out_data):
"""Forward interface. Override to create new operators.
Parameters
----------
in_data, out_data: list
input and output for forward. See document for
corresponding arguments of Operator::Forward
"""
out_data[0][:] = in_data[0]
def backward(self, out_grad, in_data, out_data, in_grad):
"""Backward interface. Can override when creating new operators.
Parameters
----------
out_grad, in_data, out_data, in_grad : list
input and output for backward. See document for
corresponding arguments of Operator::Backward
"""
# pylint: disable=W0613
in_grad[0][:] = 1.0
def infer_shape(self, in_shape):
"""Interface for ``infer_shape``. Can override when creating new operators.
Parameters
----------
in_shape : list
List of argument shapes in the same order as
declared in list_arguments.
Returns
-------
in_shape : list
List of argument shapes. Can be modified from in_shape.
out_shape : list
List of output shapes calculated from in_shape,
in the same order as declared in list_arguments.
"""
return in_shape, [in_shape[0]]
def list_outputs(self):
"""Interface for ``list_outputs``. Can override when creating new operators.
Returns
-------
outputs : list
List of output blob names.
"""
return ['output']
def list_arguments(self):
"""Interface for ``list_arguments``. Can override when creating new operators.
Returns
-------
in_shape : list
list of argument shapes in the same order as
declared in list_arguments.
"""
return ['data']
def need_top_grad(self):
"""Whether this operator needs out_grad for backward.
Returns
-------
need_top_grad : bool
Whether this operator needs out_grad for backward.
Should be set to False for loss layers.
"""
return self.need_top_grad_
class NumpyOp(PythonOp):
"""Base class for numpy operators. numpy operators allow parts
of computation in symbolic graph to be writen in numpy. This feature
is intended for quickly hacking out a solution for non performance
critical parts. Please consider write a c++ implementation if it becomes
a bottleneck.
Note that if your operator contains internal states (like arrays),
it cannot be used for multi-gpu training.
"""
def __init__(self, need_top_grad=True):
super(NumpyOp, self).__init__(need_top_grad)
warnings.warn('NumpyOp has been deprecated. Please use CustomOp')
def get_symbol(self, *args, **kwargs):
fb_functype = CFUNCTYPE(None, c_int, POINTER(POINTER(mx_float)), POINTER(c_int),
POINTER(POINTER(mx_uint)), POINTER(c_int), c_void_p)
infer_functype = CFUNCTYPE(None, c_int, POINTER(c_int),
POINTER(POINTER(mx_int)), c_void_p)
list_functype = CFUNCTYPE(None, POINTER(POINTER(POINTER(c_char))), c_void_p)
class NumpyOpInfo(Structure):
"""Structure that holds Callback information. Passed to NumpyOpProp"""
_fields_ = [
('forward', fb_functype),
('backward', fb_functype),
('infer_shape', infer_functype),
('list_outputs', list_functype),
('list_arguments', list_functype),
('p_forward', c_void_p),
('p_backward', c_void_p),
('p_infer_shape', c_void_p),
('p_list_outputs', c_void_p),
('p_list_arguments', c_void_p),
]
def forward_entry(num_tensor, tensor_ptrs, tensor_dims,
tensor_shapes, tensor_tags, _):
"""C Callback for NumpyOp::Forward"""
tensors = [[] for i in range(4)]
for i in range(num_tensor):
shape = [tensor_shapes[i][j] for j in range(tensor_dims[i])]
buff = ctypes2numpy_shared(tensor_ptrs[i], shape)
tensors[tensor_tags[i]].append(buff)
self.forward(in_data=tensors[0], out_data=tensors[1])
def backward_entry(num_tensor, tensor_ptrs, tensor_dims,
tensor_shapes, tensor_tags, _):
"""C Callback for NumpyOp::Backward"""
tensors = [[] for i in range(4)]
for i in range(num_tensor):
shape = [tensor_shapes[i][j] for j in range(tensor_dims[i])]
buff = ctypes2numpy_shared(tensor_ptrs[i], shape)
tensors[tensor_tags[i]].append(buff)
self.backward(in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3])
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for NumpyOpProp::InferShape"""
n_in = len(self.list_arguments())
n_out = len(self.list_outputs())
assert num_tensor == n_in + n_out
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])] for i in range(n_in)]
ishape, oshape = self.infer_shape(shapes)
assert len(oshape) == n_out
assert len(ishape) == n_in
rshape = list(ishape) + list(oshape)
for i in range(n_in+n_out):
tensor_shapes[i] = cast(c_array_buf(mx_int,
array('i', rshape[i])),
POINTER(mx_int))
tensor_dims[i] = len(rshape[i])
def list_outputs_entry(out, _):
"""C Callback for NumpyOpProp::ListOutputs"""
ret = self.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
def list_arguments_entry(out, _):
"""C Callback for NumpyOpProp::ListArguments"""
ret = self.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
self.info_ = NumpyOpInfo(fb_functype(forward_entry),
fb_functype(backward_entry),
infer_functype(infer_shape_entry),
list_functype(list_outputs_entry),
list_functype(list_arguments_entry),
None, None, None, None, None)
cb_ptr = format(cast(pointer(self.info_), c_void_p).value, 'x')
# pylint: disable=E1101
sym = symbol._internal._Native(*args,
info=cb_ptr,
need_top_grad=self.need_top_grad(),
**kwargs)
# keep a reference of ourself in PythonOp so we don't get garbage collected.
PythonOp._ref_holder.append(self)
return sym
class NDArrayOp(PythonOp):
"""Base class for numpy operators. numpy operators allow parts
of computation in symbolic graph to be writen in numpy. This feature
is intended for quickly hacking out a solution for non performance
critical parts. Please consider write a c++ implementation if it becomes
a bottleneck.
Note that if your operator contains internal states (like arrays),
it cannot be used for multi-gpu training.
"""
def __init__(self, need_top_grad=True):
super(NDArrayOp, self).__init__(need_top_grad)
warnings.warn('NDArrayOp has been deprecated. Please use CustomOp')
def get_symbol(self, *args, **kwargs):
fb_functype = CFUNCTYPE(c_bool, c_int, POINTER(c_void_p), POINTER(c_int), c_void_p)
infer_functype = CFUNCTYPE(c_bool, c_int, POINTER(c_int),
POINTER(POINTER(mx_int)), c_void_p)
list_functype = CFUNCTYPE(c_bool, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_bool, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
class NDArrayOpInfo(Structure):
"""Structure that holds Callback information. Passed to NDArrayOpProp"""
_fields_ = [
('forward', fb_functype),
('backward', fb_functype),
('infer_shape', infer_functype),
('list_outputs', list_functype),
('list_arguments', list_functype),
('declare_backward_dependency', deps_functype),
('p_forward', c_void_p),
('p_backward', c_void_p),
('p_infer_shape', c_void_p),
('p_list_outputs', c_void_p),
('p_list_arguments', c_void_p),
('p_declare_backward_dependency', c_void_p)
]
def forward_entry(num_ndarray, ndarraies, tags, _):
"""C Callback for NDArrayOp::Forward"""
try:
tensors = [[] for i in range(4)]
for i in range(num_ndarray):
if tags[i] == 1:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=False))
self.forward(in_data=tensors[0], out_data=tensors[1])
except Exception:
print('Error in NDArrayOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, _):
"""C Callback for NDArrayOp::Backward"""
try:
tensors = [[] for i in range(4)]
for i in range(num_ndarray):
if tags[i] == 2:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(NDArray(cast(ndarraies[i], NDArrayHandle),
writable=False))
self.backward(in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3])
except Exception:
print('Error in NDArrayOp.backward: %s' % traceback.format_exc())
return False
return True
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for NDArrayOpProp::InferShape"""
try:
n_in = len(self.list_arguments())
n_out = len(self.list_outputs())
assert num_tensor == n_in + n_out
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])] for i in range(n_in)]
ishape, oshape = self.infer_shape(shapes)
assert len(oshape) == n_out
assert len(ishape) == n_in
rshape = list(ishape) + list(oshape)
for i in range(n_in+n_out):
tensor_shapes[i] = cast(c_array_buf(mx_int,
array('i', rshape[i])),
POINTER(mx_int))
tensor_dims[i] = len(rshape[i])
except Exception:
print('Error in NDArrayOp.infer_shape: %s' % traceback.format_exc())
return False
return True
def list_outputs_entry(out, _):
"""C Callback for NDArrayOpProp::ListOutputs"""
try:
ret = self.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
except Exception:
print('Error in NDArrayOp.list_outputs: %s' % traceback.format_exc())
return False
return True
def list_arguments_entry(out, _):
"""C Callback for NDArrayOpProp::ListArguments"""
try:
ret = self.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
except Exception:
print('Error in NDArrayOp.list_arguments: %s' % traceback.format_exc())
return False
return True
def declare_backward_dependency(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for NDArrayOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(self.list_outputs()))]
in_data = [in_data[i] for i in range(len(self.list_arguments()))]
out_data = [out_data[i] for i in range(len(self.list_outputs()))]
rdeps = self.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
deps[0] = rdeps
except Exception:
print('Error in NDArrayOp.declare_backward_dependency: %s' % traceback.format_exc())
return False
return True
self.info_ = NDArrayOpInfo(fb_functype(forward_entry),
fb_functype(backward_entry),
infer_functype(infer_shape_entry),
list_functype(list_outputs_entry),
list_functype(list_arguments_entry),
deps_functype(declare_backward_dependency),
None, None, None, None, None, None)
cb_ptr = format(cast(pointer(self.info_), c_void_p).value, 'x')
# pylint: disable=E1101
sym = symbol._internal._NDArray(*args,
info=cb_ptr,
**kwargs)
# keep a reference of ourself in PythonOp so we don't get garbage collected.
PythonOp._ref_holder.append(self)
return sym
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad():
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
class CustomOp(object):
"""Base class for operators implemented in python"""
def __init__(self):
pass
def forward(self, is_train, req, in_data, out_data, aux):
"""Forward interface. Can override when creating new operators.
Parameters
----------
is_train : bool
whether this is for training
req : list of str
how to assign to out_data. can be 'null', 'write', or 'add'.
You can optionally use self.assign(dst, req, src) to handle this.
in_data, out_data, aux: list of NDArrays
input, output, and auxiliary states for forward. See document for
corresponding arguments of Operator::Forward
"""
# pylint: disable=W0613
pass
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
"""Backward interface. Can override when creating new operators.
Parameters
----------
req : list of str
how to assign to in_grad. can be 'null', 'write', or 'add'.
You can optionally use self.assign(dst, req, src) to handle this.
out_grad, in_data, out_data, in_grad, aux : list of NDArrays
input and output for backward. See document for
corresponding arguments of Operator::Backward
"""
# pylint: disable=W0613
pass
def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
elif req in ('write', 'inplace'):
if is_np_array():
dst[()] = src
else:
dst[:] = src
elif req == 'add':
if is_np_array():
dst[()] += src
else:
dst[:] += src
class CustomOpProp(object):
"""Base class for operator property class implemented in python.
Parameters
----------
need_top_grad : bool
The default declare_backward_dependency function. Use this value
to determine whether this operator needs gradient input.
"""
def __init__(self, need_top_grad=True):
self.need_top_grad_ = need_top_grad
def infer_shape(self, in_shape):
"""infer_shape interface. Can override when creating new operators.
Parameters
----------
in_shape : list
List of argument shapes in the same order as
declared in list_arguments.
Returns
-------
in_shape : list
List of argument shapes. Can be modified from in_shape.
out_shape : list
List of output shapes calculated from in_shape,
in the same order as declared in list_outputs.
aux_shape : Optional, list
List of aux shapes calculated from in_shape,
in the same order as declared in list_auxiliary_states.
"""
return in_shape, (in_shape[0],)*len(self.list_outputs()), ()
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states())
def infer_storage_type(self, in_stype):
"""infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
"""
for i, stype in enumerate(in_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type implementation doesnt allow non default stypes: " \
"found non default stype '%s' for in_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input/output stypes" % (stype, i)
return in_stype, \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states())
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
"""infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
"""
for i, stype in enumerate(ograd_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for ograd_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default output gradient stypes" % (stype, i)
for i, stype in enumerate(igrad_stype):
if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]:
stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for igrad_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input gradient stypes" % (stype, i)
stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype]
for stype_list in stype_lists:
stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]
return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4]
def list_outputs(self):
"""list_outputs interface. Can override when creating new operators.
Returns
-------
outputs : list
List of output blob names.
"""
return ['output']
def list_arguments(self):
"""list_arguments interface. Can override when creating new operators.
Returns
-------
arguments : list
List of argument blob names.
"""
return ['data']
def list_auxiliary_states(self):
"""list_auxiliary_states interface. Can override when creating new operators.
Returns
-------
auxs : list
list of auxiliary state blob names.
"""
return []
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad_:
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
def create_operator(self, ctx, in_shapes, in_dtypes):
"""Create an operator that carries out the real computation
given the context, input shapes, and input data types."""
# pylint: disable=W0613
return CustomOp()
class _Registry(object):
"""CustomOp registry."""
def __init__(self):
self.ref_holder = {}
self.counter = 0
self.result_deps = set()
self.lock = Lock()
def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur
_registry = _Registry()
def register(reg_name):
"""Register a subclass of CustomOpProp to the registry with name reg_name."""
def do_register(prop_cls):
"""Register a subclass of CustomOpProp to the registry."""
fb_functype = CFUNCTYPE(c_int, c_int, POINTER(c_void_p), POINTER(c_int),
POINTER(c_int), c_int, c_void_p)
del_functype = CFUNCTYPE(c_int, c_void_p)
infershape_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int),
POINTER(POINTER(mx_int)), c_void_p)
infertype_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_backward_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), \
POINTER(c_int), c_void_p)
list_functype = CFUNCTYPE(c_int, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_int, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
createop_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(POINTER(mx_uint)),
POINTER(c_int), POINTER(c_int),
POINTER(MXCallbackList), c_void_p)
req_enum = ('null', 'write', 'inplace', 'add')
create_ndarray_fn = _np_ndarray_cls if is_np_array() else _ndarray_cls
def creator(op_type, argc, keys, vals, ret):
"""internal function"""
assert py_str(op_type) == reg_name
kwargs = dict([(py_str(keys[i]), py_str(vals[i])) for i in range(argc)])
op_prop = prop_cls(**kwargs)
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for ``CustomOpProp::InferShape``."""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])]
for i in range(n_in)]
ret = op_prop.infer_shape(shapes)
if len(ret) == 2:
ishape, oshape = ret
ashape = []
elif len(ret) == 3:
ishape, oshape, ashape = ret
else:
raise AssertionError("infer_shape must return 2 or 3 lists")
assert len(oshape) == n_out, \
"InferShape Error: expecting %d entries in returned output " \
"shapes, got %d."%(n_out, len(oshape))
assert len(ishape) == n_in, \
"InferShape Error: expecting %d entries in returned input " \
"shapes, got %d."%(n_in, len(ishape))
assert len(ashape) == n_aux, \
"InferShape Error: expecting %d entries in returned aux state " \
"shapes, got %d."%(n_aux, len(ashape))
rshape = list(ishape) + list(oshape) + list(ashape)
for i in range(n_in+n_out+n_aux):
tensor_shapes[i] = cast(c_array_buf(mx_int,
array('i', rshape[i])),
POINTER(mx_int))
tensor_dims[i] = len(rshape[i])
infer_shape_entry._ref_holder = [tensor_shapes]
except Exception:
print('Error in %s.infer_shape: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_backward_entry(num_tensor, tensor_stypes, tags, _):
# pylint: disable=C0301
"""C Callback for CustomOpProp::InferStorageTypeBackward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_tensor):
tensors[tags[i]].append(_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]])
# Ordering of stypes: ograd, input, output, igrad, aux
tensors = [tensors[3], tensors[0], tensors[1], tensors[2], tensors[4]]
ret = op_prop.infer_storage_type_backward(tensors[0],
tensors[1],
tensors[2],
tensors[3],
tensors[4])
if len(ret) == 4:
ret += []
elif len(ret) == 5:
pass
else:
raise AssertionError("infer_storage_type_backward must return 4 or 5 lists")
assert len(ret[0]) == len(tensors[0]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output gradient " \
"stypes, got %d."%(len(tensors[0]), len(ret[0]))
assert len(ret[1]) == len(tensors[1]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input stypes, " \
"got %d."%(len(tensors[1]), len(ret[1]))
assert len(ret[2]) == len(tensors[2]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output stypes, " \
"got %d."%(len(tensors[2]), len(ret[2]))
assert len(ret[3]) == len(tensors[3]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input gradient stypes, " \
"got %d."%(len(tensors[3]), len(ret[3]))
assert len(ret[4]) == len(tensors[4]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned aux stypes, " \
"got %d."%(len(tensors[4]), len(ret[4]))
rstype = []
for i, ret_list in enumerate(ret):
rstype.extend(ret_list)
for i, stype in enumerate(rstype):
assert stype != _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED], \
"stype should not be undefined"
assert stype in _STORAGE_TYPE_STR_TO_ID, \
"Provided stype: %s is not valid " \
"valid stypes are %s, %s, %s"%(stype,
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_ROW_SPARSE],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_CSR])
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[stype]
infer_storage_type_backward_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_entry(num_tensor, tensor_stypes, _):
"""C Callback for CustomOpProp::InferStorageType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
stypes = [_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]] for i in range(n_in)]
ret = op_prop.infer_storage_type(stypes)
if len(ret) == 2:
istype, ostype = ret
astype = []
elif len(ret) == 3:
istype, ostype, astype = ret
else:
raise AssertionError("infer_storage_type must return 2 or 3 lists")
assert len(ostype) == n_out, \
"InferStorageType Error: expecting %d entries in returned output " \
"stypes, got %d."%(n_out, len(ostype))
assert len(istype) == n_in, \
"InferStorageType Error: expecting %d entries in returned input " \
"stypes, got %d."%(n_in, len(istype))
assert len(astype) == n_aux, \
"InferStorageType Error: expecting %d entries in returned aux state " \
"stypes, got %d."%(n_aux, len(astype))
rtype = list(istype) + list(ostype) + list(astype)
for i, dtype in enumerate(rtype):
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[dtype]
infer_storage_type_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_type_entry(num_tensor, tensor_types, _):
"""C Callback for CustomOpProp::InferType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
types = [_DTYPE_MX_TO_NP[tensor_types[i]] for i in range(n_in)]
ret = op_prop.infer_type(types)
if len(ret) == 2:
itype, otype = ret
atype = []
elif len(ret) == 3:
itype, otype, atype = ret
else:
raise AssertionError("infer_type must return 2 or 3 lists")
assert len(otype) == n_out, \
"InferType Error: expecting %d entries in returned output " \
"types, got %d."%(n_out, len(otype))
assert len(itype) == n_in, \
"InferType Error: expecting %d entries in returned input " \
"types, got %d."%(n_in, len(itype))
assert len(atype) == n_aux, \
"InferType Error: expecting %d entries in returned aux state " \
"types, got %d."%(n_aux, len(atype))
rtype = list(itype) + list(otype) + list(atype)
for i, dtype in enumerate(rtype):
tensor_types[i] = _DTYPE_NP_TO_MX[dtype]
infer_type_entry._ref_holder = [tensor_types]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_outputs_entry(out, _):
"""C Callback for CustomOpProp::ListOutputs"""
try:
ret = op_prop.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_outputs_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_outputs: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_arguments_entry(out, _):
"""C Callback for CustomOpProp::ListArguments"""
try:
ret = op_prop.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_arguments_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_arguments: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_auxiliary_states_entry(out, _):
"""C Callback for CustomOpProp::ListAuxiliaryStates"""
try:
ret = op_prop.list_auxiliary_states()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_auxiliary_states_entry._ref_holder = [out]
except Exception:
tb = traceback.format_exc()
print('Error in %s.list_auxiliary_states: %s' % (reg_name, tb))
return False
return True
def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for CustomOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(op_prop.list_outputs()))]
in_data = [in_data[i] for i in range(len(op_prop.list_arguments()))]
out_data = [out_data[i] for i in range(len(op_prop.list_outputs()))]
rdeps = op_prop.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
_registry.result_deps = set()
for dep in rdeps:
_registry.result_deps.add(dep)
rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
deps[0] = rdeps
declare_backward_dependency_entry._ref_holder = [deps]
except Exception:
tb = traceback.format_exc()
print('Error in %s.declare_backward_dependency: %s' % (reg_name, tb))
return False
return True
def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _):
"""C Callback for CustomOpProp::CreateOperator"""
try:
ctx = py_str(ctx)
sep = ctx.find('(')
ctx = context.Context(ctx[:sep], int(ctx[sep+1:-1]))
ndims = [ndims[i] for i in range(num_inputs)]
shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)]
dtypes = [dtypes[i] for i in range(num_inputs)]
op = op_prop.create_operator(ctx, shapes, dtypes)
def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Forward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_ndarray):
if tags[i] == 1 or tags[i] == 4:
tensors[tags[i]].append(
create_ndarray_fn(cast(ndarraies[i], NDArrayHandle), writable=True)
)
else:
tensors[tags[i]].append(
create_ndarray_fn(cast(ndarraies[i], NDArrayHandle), writable=False)
)
reqs = [req_enum[reqs[i]] for i in range(len(tensors[1]))]
with ctx:
op.forward(is_train=is_train, req=reqs,
in_data=tensors[0], out_data=tensors[1],
aux=tensors[4])
except Exception:
print('Error in CustomOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Backward"""
# pylint: disable=W0613
try:
tensors = [[] for i in range(5)]
num_outputs = len(op_prop.list_outputs())
num_args = len(op_prop.list_arguments())
for i in range(num_ndarray):
if i in _registry.result_deps or i >= (num_outputs * 2 + num_args):
# If it is a backward dependency or output or aux:
# Set stype as undefined so that it returns
# ndarray based on existing stype
stype = _STORAGE_TYPE_UNDEFINED
else:
# If it is some input, output or out grad ndarray not part of
# backward dependency it is empty and thus the ndarray should
# be set to default
stype = _STORAGE_TYPE_DEFAULT
if tags[i] == 2 or tags[i] == 4:
tensors[tags[i]].append(
create_ndarray_fn(cast(ndarraies[i], NDArrayHandle),
writable=True, stype=stype)
)
else:
tensors[tags[i]].append(
create_ndarray_fn(cast(ndarraies[i], NDArrayHandle),
writable=False, stype=stype)
)
reqs = [req_enum[reqs[i]] for i in range(len(tensors[2]))]
with ctx:
op.backward(req=reqs,
in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3],
aux=tensors[4])
except Exception:
print('Error in CustomOp.backward: %s' % traceback.format_exc())
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
fb_functype(forward_entry),
fb_functype(backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None, None, None]
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op._ref_holder = [ret]
_registry.ref_holder[cur] = op
except Exception:
print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc()))
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOpProp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOpProp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
list_functype(list_arguments_entry),
list_functype(list_outputs_entry),
list_functype(list_auxiliary_states_entry),
infershape_functype(infer_shape_entry),
deps_functype(declare_backward_dependency_entry),
createop_functype(create_operator_entry),
infertype_functype(infer_type_entry),
inferstorage_functype(infer_storage_type_entry),
inferstorage_backward_functype(infer_storage_type_backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None]*len(callbacks)
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op_prop._ref_holder = [ret]
_registry.ref_holder[cur] = op_prop
return True
creator_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(c_char_p),
POINTER(c_char_p), POINTER(MXCallbackList))
creator_func = creator_functype(creator)
check_call(_LIB.MXCustomOpRegister(c_str(reg_name), creator_func))
cur = _registry.inc()
_registry.ref_holder[cur] = creator_func
return prop_cls
return do_register
register("custom_op")(CustomOpProp)
def get_all_registered_operators():
"""Get all registered MXNet operator names.
Returns
-------
operator_names : list of string
"""
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
mx_registered_operator_names = [py_str(plist[i]) for i in range(size.value)]
return mx_registered_operator_names
OperatorArguments = collections.namedtuple('OperatorArguments', ['narg', 'names', 'types'])
def get_operator_arguments(op_name):
"""Given operator name, fetch operator arguments - number of arguments,
argument names, argument types.
Parameters
----------
op_name: str
Handle for the operator
Returns
-------
operator_arguments : OperatorArguments, namedtuple with number of arguments, names and types
"""
op_handle = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(op_name), ctypes.byref(op_handle)))
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
op_handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
return OperatorArguments(narg, arg_names, arg_types)
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.flort_dj_dcl
@file marine-integrations/mi/dataset/parser/flort_dj_dcl.py
@author Steve Myerson
@brief Parser for the flort_dj_dcl dataset driver
This file contains code for the flort_dj_dcl parsers and code to produce data particles.
For telemetered data, there is one parser which produces one type of data particle.
For recovered data, there is one parser which produces one type of data particle.
The input files and the content of the data particles are the same for both
recovered and telemetered.
Only the names of the output particle streams are different.
The input file is ASCII and contains 2 types of records.
Records are separated by a newline.
All records start with a timestamp.
Metadata records: timestamp [text] more text newline.
Sensor Data records: timestamp sensor_data newline.
Only sensor data records produce particles if properly formed.
Mal-formed sensor data records and all metadata records produce no particles.
Release notes:
Initial Release
"""
__author__ = 'Steve Myerson'
__license__ = 'Apache 2.0'
import calendar
import copy
from functools import partial
import re
from mi.core.instrument.chunker import \
StringChunker
from mi.core.log import get_logger; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.exceptions import \
DatasetParserException, \
UnexpectedDataException
from mi.core.instrument.data_particle import \
DataParticle, \
DataParticleKey, \
DataParticleValue
from mi.dataset.dataset_parser import BufferLoadingParser
# Basic patterns
ANY_CHARS = r'.*' # Any characters excluding a newline
NEW_LINE = r'(?:\r\n|\n)' # any type of new line
UINT = r'(\d*)' # unsigned integer as a group
SPACE = ' '
TAB = '\t'
START_GROUP = '('
END_GROUP = ')'
# Timestamp at the start of each record: YYYY/MM/DD HH:MM:SS.mmm
# Metadata fields: [text] more text
# Sensor data has tab-delimited fields (date, time, integers)
# All records end with one of the newlines.
DATE = r'(\d{4})/(\d{2})/(\d{2})' # Date: YYYY/MM/DD
TIME = r'(\d{2}):(\d{2}):(\d{2})\.\d{3}' # Time: HH:MM:SS.mmm
SENSOR_DATE = r'(\d{2}/\d{2}/\d{2})' # Sensor Date: MM/DD/YY
SENSOR_TIME = r'(\d{2}:\d{2}:\d{2})' # Sensor Time: HH:MM:SS
TIMESTAMP = START_GROUP + DATE + SPACE + TIME + END_GROUP
START_METADATA = r'\['
END_METADATA = r'\]'
# All flort records are ASCII characters separated by a newline.
FLORT_RECORD_PATTERN = ANY_CHARS # Any number of ASCII characters
FLORT_RECORD_PATTERN += NEW_LINE # separated by a new line
FLORT_RECORD_MATCHER = re.compile(FLORT_RECORD_PATTERN)
# Metadata record:
# Timestamp [Text]MoreText newline
METADATA_PATTERN = TIMESTAMP + SPACE # dcl controller timestamp
METADATA_PATTERN += START_METADATA # Metadata record starts with '['
METADATA_PATTERN += ANY_CHARS # followed by text
METADATA_PATTERN += END_METADATA # followed by ']'
METADATA_PATTERN += ANY_CHARS # followed by more text
METADATA_PATTERN += r'\n' # metadata record ends with LF
METADATA_MATCHER = re.compile(METADATA_PATTERN)
# Sensor data record:
# Timestamp Date<tab>Time<tab>SensorData
# where SensorData are tab-separated unsigned integer numbers
SENSOR_DATA_PATTERN = TIMESTAMP + SPACE # dcl controller timestamp
SENSOR_DATA_PATTERN += SENSOR_DATE + TAB # sensor date
SENSOR_DATA_PATTERN += SENSOR_TIME + TAB # sensor time
SENSOR_DATA_PATTERN += UINT + TAB # measurement wavelength beta
SENSOR_DATA_PATTERN += UINT + TAB # raw signal beta
SENSOR_DATA_PATTERN += UINT + TAB # measurement wavelength chl
SENSOR_DATA_PATTERN += UINT + TAB # raw signal chl
SENSOR_DATA_PATTERN += UINT + TAB # measurement wavelength cdom
SENSOR_DATA_PATTERN += UINT + TAB # raw signal cdom
SENSOR_DATA_PATTERN += UINT # raw internal temperature
SENSOR_DATA_PATTERN += r'\r\n' # sensor data ends with CR-LF
SENSOR_DATA_MATCHER = re.compile(SENSOR_DATA_PATTERN)
# SENSOR_DATA_MATCHER produces the following groups.
# The following are indices into groups() produced by SENSOR_DATA_MATCHER.
# i.e, match.groups()[INDEX]
SENSOR_GROUP_TIMESTAMP = 0
SENSOR_GROUP_YEAR = 1
SENSOR_GROUP_MONTH = 2
SENSOR_GROUP_DAY = 3
SENSOR_GROUP_HOUR = 4
SENSOR_GROUP_MINUTE = 5
SENSOR_GROUP_SECOND = 6
SENSOR_GROUP_SENSOR_DATE = 7
SENSOR_GROUP_SENSOR_TIME = 8
SENSOR_GROUP_WAVELENGTH_BETA = 9
SENSOR_GROUP_RAW_SIGNAL_BETA = 10
SENSOR_GROUP_WAVELENGTH_CHL = 11
SENSOR_GROUP_RAW_SIGNAL_CHL = 12
SENSOR_GROUP_WAVELENGTH_CDOM = 13
SENSOR_GROUP_RAW_SIGNAL_CDOM = 14
SENSOR_GROUP_INTERNAL_TEMPERATURE = 15
# This table is used in the generation of the instrument data particle.
# Column 1 - particle parameter name
# Column 2 - group number (index into raw_data)
# Column 3 - data encoding function (conversion required - int, float, etc)
INSTRUMENT_PARTICLE_MAP = [
('dcl_controller_timestamp', SENSOR_GROUP_TIMESTAMP, str),
('date_string', SENSOR_GROUP_SENSOR_DATE, str),
('time_string', SENSOR_GROUP_SENSOR_TIME, str),
('measurement_wavelength_beta', SENSOR_GROUP_WAVELENGTH_BETA, int),
('raw_signal_beta', SENSOR_GROUP_RAW_SIGNAL_BETA, int),
('measurement_wavelength_chl', SENSOR_GROUP_WAVELENGTH_CHL, int),
('raw_signal_chl', SENSOR_GROUP_RAW_SIGNAL_CHL, int),
('measurement_wavelength_cdom', SENSOR_GROUP_WAVELENGTH_CDOM, int),
('raw_signal_cdom', SENSOR_GROUP_RAW_SIGNAL_CDOM, int),
('raw_internal_temp', SENSOR_GROUP_INTERNAL_TEMPERATURE, int)
]
class FlortStateKey(BaseEnum):
POSITION = 'position' # position within the input file
class DataParticleType(BaseEnum):
REC_INSTRUMENT_PARTICLE = 'flort_dj_dcl_instrument_recovered'
TEL_INSTRUMENT_PARTICLE = 'flort_dj_dcl_instrument'
class FlortDjDclInstrumentDataParticle(DataParticle):
"""
Class for generating the Flort_dj instrument particle.
"""
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(FlortDjDclInstrumentDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
# The particle timestamp is the DCL Controller timestamp.
# The individual fields have already been extracted by the parser.
timestamp = (
int(self.raw_data[SENSOR_GROUP_YEAR]),
int(self.raw_data[SENSOR_GROUP_MONTH]),
int(self.raw_data[SENSOR_GROUP_DAY]),
int(self.raw_data[SENSOR_GROUP_HOUR]),
int(self.raw_data[SENSOR_GROUP_MINUTE]),
int(self.raw_data[SENSOR_GROUP_SECOND]),
0, 0, 0)
elapsed_seconds = calendar.timegm(timestamp)
self.set_internal_timestamp(unix_time=elapsed_seconds)
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Instrument Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into the match groups (which is what has been stored in raw_data),
# and a function to use for data conversion.
return [self._encode_value(name, self.raw_data[group], function)
for name, group, function in INSTRUMENT_PARTICLE_MAP]
class FlortDjDclRecoveredInstrumentDataParticle(FlortDjDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.REC_INSTRUMENT_PARTICLE
class FlortDjDclTelemeteredInstrumentDataParticle(FlortDjDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.TEL_INSTRUMENT_PARTICLE
class FlortDjDclParser(BufferLoadingParser):
"""
Parser for Flort_dj_dcl data.
In addition to the standard constructor parameters,
this constructor takes an additional parameter particle_class.
"""
def __init__(self,
config,
stream_handle,
state,
state_callback,
publish_callback,
exception_callback,
particle_class,
*args, **kwargs):
# No fancy sieve function needed for this parser.
# File is ASCII with records separated by newlines.
super(FlortDjDclParser, self).__init__(config,
stream_handle,
state,
partial(StringChunker.regex_sieve_function,
regex_list=[FLORT_RECORD_MATCHER]),
state_callback,
publish_callback,
exception_callback,
*args,
**kwargs)
# Default the position within the file to the beginning.
self._read_state = {FlortStateKey.POSITION: 0}
self.input_file = stream_handle
self.particle_class = particle_class
# If there's an existing state, update to it.
if state is not None:
self.set_state(state)
def handle_non_data(self, non_data, non_end, start):
"""
Handle any non-data that is found in the file
"""
# Handle non-data here.
# Increment the position within the file.
# Use the _exception_callback.
if non_data is not None and non_end <= start:
self._increment_position(len(non_data))
self._exception_callback(UnexpectedDataException(
"Found %d bytes of un-expected non-data %s" %
(len(non_data), non_data)))
def _increment_position(self, bytes_read):
"""
Increment the position within the file.
@param bytes_read The number of bytes just read
"""
self._read_state[FlortStateKey.POSITION] += bytes_read
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker.
If it is valid data, build a particle.
Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state.
"""
result_particles = []
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
self._increment_position(len(chunk))
# If this is a valid sensor data record,
# use the extracted fields to generate a particle.
sensor_match = SENSOR_DATA_MATCHER.match(chunk)
if sensor_match is not None:
particle = self._extract_sample(self.particle_class,
None,
sensor_match.groups(),
None)
if particle is not None:
result_particles.append((particle, copy.copy(self._read_state)))
# It's not a sensor data record, see if it's a metadata record.
else:
# If it's a valid metadata record, ignore it.
# Otherwise generate warning for unknown data.
meta_match = METADATA_MATCHER.match(chunk)
if meta_match is None:
error_message = 'Unknown data found in chunk %s' % chunk
log.warn(error_message)
self._exception_callback(UnexpectedDataException(error_message))
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
return result_particles
def set_state(self, state_obj):
"""
Set the value of the state object for this parser
@param state_obj The object to set the state to.
@throws DatasetParserException if there is a bad state structure
"""
if not isinstance(state_obj, dict):
raise DatasetParserException("Invalid state structure")
if not (FlortStateKey.POSITION in state_obj):
raise DatasetParserException('%s missing in state keys' %
FlortStateKey.POSITION)
self._record_buffer = []
self._state = state_obj
self._read_state = state_obj
self.input_file.seek(state_obj[FlortStateKey.POSITION])
class FlortDjDclRecoveredParser(FlortDjDclParser):
"""
This is the entry point for the Recovered Flort_dj_dcl parser.
"""
def __init__(self,
config,
stream_handle,
state,
state_callback,
publish_callback,
exception_callback,
*args, **kwargs):
super(FlortDjDclRecoveredParser, self).__init__(config,
stream_handle,
state,
state_callback,
publish_callback,
exception_callback,
FlortDjDclRecoveredInstrumentDataParticle,
*args,
**kwargs)
class FlortDjDclTelemeteredParser(FlortDjDclParser):
"""
This is the entry point for the Telemetered Flort_dj_dcl parser.
"""
def __init__(self,
config,
stream_handle,
state,
state_callback,
publish_callback,
exception_callback,
*args, **kwargs):
super(FlortDjDclTelemeteredParser, self).__init__(config,
stream_handle,
state,
state_callback,
publish_callback,
exception_callback,
FlortDjDclTelemeteredInstrumentDataParticle,
*args,
**kwargs)
|
|
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import numpy as np
import pickle
import time
import sys
import os
import nltk
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from dnc import DNC
from recurrent_controller import StatelessRecurrentController
def bleu_score(target_batch, predict_batch, print_prob=0.995):
s=[]
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t >1:
trim_target.append(t)
for t in predict_batch[b]:
if t >1:
trim_predict.append(t)
if np.random.rand()>print_prob:
print('{} vs {}'.format(trim_target, trim_predict))
BLEUscore = nltk.translate.bleu_score.sentence_bleu([trim_target], trim_predict, weights=[0.5,0.5])
s.append(BLEUscore)
return np.mean(s)
def set_score_pre(target_batch, predict_batch):
s = []
s2 = []
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t > 1:
trim_target.append(t)
for t in predict_batch[b]:
if t > 1:
trim_predict.append(t)
if np.random.rand()>0.99:
print('{} vs {}'.format(trim_target, trim_predict))
acc = len(set(trim_target).intersection(set(trim_predict)))/len(set(trim_target))
acc2=0
if len(set(trim_predict))>0:
acc2 = len(set(trim_target).intersection(set(trim_predict))) / len(trim_predict)
s.append(acc)
s2.append(acc2)
return np.mean(s), np.mean(s2)
def set_score_hist(target_batch, predict_batch):
acc_label={}
guess_label={}
count_label={}
for b in range(target_batch.shape[0]):
for t, t2 in zip(target_batch[b], predict_batch[b]):
# print('{} ----- {}'.format(t, t2))
trim_target = []
for tt in t:
if tt > 1:
trim_target.append(tt)
for l in trim_target:
if l not in count_label:
count_label[l]=0
count_label[l]+=1
trim_predict = []
for tt in t2:
if tt > 1:
trim_predict.append(tt)
if np.random.rand()>0.99:
print('{} vs {}'.format(trim_target, trim_predict))
for l in trim_predict:
if l not in guess_label:
guess_label[l]=0
guess_label[l]+=1
correct = list(set(trim_target).intersection(set(trim_predict)))
for c in correct:
if c not in acc_label:
acc_label[c]=0
acc_label[c]+=1
recall=[]
precision=[]
fscore=[]
for k,v in sorted(count_label.items()):
if k in acc_label:
rec = acc_label[k] / count_label[k]
prec= acc_label[k] / guess_label[k]
recall.append(rec)
precision.append(prec)
fscore.append(2*rec*prec/(rec+prec))
else:
recall.append(0)
precision.append(0)
fscore.append(0)
return recall, precision, fscore
import editdistance as ed
def batch_norm_edit_score(reals, preds, pprint=0.999):
avgs=0
c=0
for i,real in enumerate(reals):
avgs += norm_edit_score(reals[i],preds[i],pprint)
c+=1
return avgs/c
def norm_edit_score(real, pred, pprob=0.999):
trimpred=[]
for p in pred:
if p>1:
trimpred.append(p)
trimreal=[]
for r in real:
if r>1:
trimreal.append(r)
if np.random.rand() > pprob:
print('{} vs {}'.format(trimreal, trimpred))
if trimpred is []:
return 1
#print(trimreal)
return ed.eval(trimpred,trimreal)/max(len(trimpred),len(trimreal))
def norm_edit_score_raw(real, pred, pprob=0.999):
if np.random.rand() > pprob:
print('{} vs {}'.format(real, pred))
return ed.eval(pred,real)/max(len(pred),len(real))
def llprint(message):
sys.stdout.write(message)
sys.stdout.flush()
def write_predict(wfile, list_pred):
with open(wfile,'w') as f:
for p in list_pred:
f.write('[')
for n in p[:-1]:
f.write(str(n-1)+' ')
f.write(str(p[-1]-1))
f.write(']')
f.write('\n')
def load(path):
return pickle.load(open(path, 'rb'))
def onehot(index, size):
# print('-----')
# print(index)
vec = np.zeros(size, dtype=np.float32)
vec[int(index)] = 1.0
return vec
def prepare_sample(dig_list, proc_list, word_space_size_input, word_space_size_output, index=-1):
if index<0:
index = int(np.random.choice(len(dig_list),1))
# print('\n{}'.format(index))
ins=dig_list[index]
ose=proc_list[index]
seq_len = len(ins) + 1 + len(ose)
input_vec = np.zeros(seq_len)
for iii, token in enumerate(ins):
input_vec[iii] = token
input_vec[len(ins)] = 1
output_vec = np.zeros(seq_len)
decoder_point = len(ins) + 1
for iii, token in enumerate(ose):
output_vec[decoder_point + iii] = token
input_vec = np.array([[onehot(code, word_space_size_input) for code in input_vec]])
output_vec = np.array([[onehot(code, word_space_size_output) for code in output_vec]])
return input_vec, output_vec, seq_len, decoder_point, index
def prepare_sample_batch(dig_list,proc_list,word_space_size_input,word_space_size_output, bs, lm_train=False):
if isinstance(bs, int):
indexs = np.random.choice(len(dig_list),bs,replace=False)
else:
#print('from {} to {}'.format(bs[0],bs[1]))
indexs=list(range(bs[0],bs[1]))
minlen=0
moutlne=0
for index in indexs:
minlen=max(len(dig_list[index]),minlen)
moutlne = max(len(proc_list[index]+[0]), moutlne)
# moutlne*=2
input_vecs=[]
output_vecs=[]
seq_len = minlen + 1 + moutlne
decoder_point = minlen + 1
out_list=[]
masks=[]
for index in indexs:
# print('\n{}'.format(index))
ins=dig_list[index]
ose=proc_list[index]+[0]
out_list.append(ose)
input_vec = np.zeros(seq_len)
output_vec = np.zeros(seq_len)
mask=np.zeros(seq_len, dtype=np.bool)
for iii, token in enumerate(ins):
input_vec[minlen-len(ins)+iii] = token
if lm_train:
output_vec[minlen - len(ins) + iii+1] = token
mask[minlen - len(ins) + iii+1] = True
input_vec[minlen] = 1
for iii, token in enumerate(ose):
output_vec[decoder_point + iii] = token
mask[decoder_point + iii]=True
# print(ins)
# print(ose)
# print(input_vec)
# print(output_vec)
# print('====')
input_vec = [onehot(code, word_space_size_input) for code in input_vec]
output_vec = [onehot(code, word_space_size_output) for code in output_vec]
input_vecs.append(input_vec)
output_vecs.append(output_vec)
masks.append(mask)
# raise False
return np.asarray(input_vecs), np.asarray(output_vecs), seq_len, decoder_point, np.asarray(masks), out_list
def load_dict(dir='./data/BusinessProcess/Moodle'):
return pickle.load(open(dir+'/event_vocab.pkl','rb'))
def load_single_sequence(fname):
seqs=[]
rl=''
for l in open(fname):
if l.strip()[-1]==']':
if rl!='':
l=rl
s=l.strip()[1:-1].strip().split()
seqs.append([int(x)+1 for x in s])
rl=''
else:
rl+=l+' '
return seqs
def load_sequence(dir='./data/BusinessProcess/Moodle/'):
train_in=dir+'/train_prefixes.txt'
train_out=dir+'/train_suffixes.txt'
test_in = dir + '/test_prefixes.txt'
test_out = dir + '/test_suffixes.txt'
str_in=load_single_sequence(train_in)
strain_out = load_single_sequence(train_out)
stest_in = load_single_sequence(test_in)
stest_out = load_single_sequence(test_out)
return str_in, strain_out, stest_in, stest_out
def moodle_train():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_moodle')
_,_,_,char2label=load_dict()
str_in, strain_out, stest_in, stest_out = load_sequence()
print(str_in[:10])
print(strain_out[:10])
print(stest_in[:10])
print(stest_out[:10])
print('num train {}'.format(len(str_in)))
print('num test {}'.format(len(stest_in)))
print('dim in {}'.format(len(char2label)))
print('dim out {}'.format(len(char2label)))
input_size = len(char2label)+1
output_size = len(char2label)+1
sequence_max_length = 100 #redundant, not used, use dynamic length
batch_size = 10
words_count = 64
word_size = 100
read_heads = 1
iterations = 150000
max_train_iterations = 100000
start_step = 0
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim=100,
use_emb=False,
use_mem=False,
decoder_mode=True,
dual_controller=False,
write_protect=False,
dual_emb=True
)
output,prob,loss,apply_gradients=ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
last_100_losses = []
start = 0 if start_step == 0 else start_step + 1
end = start_step + iterations + 1
minscore=1000
start_time_100 = time.time()
avg_100_time = 0.
avg_counter = 0
train_writer = tf.summary.FileWriter('./data/log_moddle/', session.graph)
for i in range(start, end + 1):
try:
llprint("\rIteration %d/%d" % (i, end))
input_vec, output_vec, seq_len, decoder_point, masks, _ = \
prepare_sample_batch(str_in, strain_out, input_size, output_size, bs=batch_size, lm_train=False)
summerize = (i % 500 == 0)
if i<=max_train_iterations:
loss_value, _ = session.run([
loss,
apply_gradients
], feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
else:
loss_value = session.run(loss, feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
})
last_100_losses.append(loss_value)
if summerize:
llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses)))
summary = tf.Summary()
summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses))
trscores = []
for ii in range(10):
input_vec, output_vec, seq_len, decoder_point, masks, rout_list =\
prepare_sample_batch(str_in, strain_out, input_size, output_size, bs=batch_size, lm_train=False)
out = session.run([prob], feed_dict={ncomputer.input_data: input_vec,
ncomputer.decoder_point: decoder_point,
ncomputer.sequence_length: seq_len,
ncomputer.mask:masks})
out = np.reshape(np.asarray(out), [-1, seq_len,output_size])
out = np.argmax(out, axis=-1)
bout_list=[]
# print('{} vs {}'.format(seq_len,out.shape[1]))
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
trscores.append(batch_norm_edit_score(rout_list, bout_list,0.95))
print('-----')
tescores = []
losses = []
ntb=len(stest_in)//batch_size+1
for ii in range(ntb):
if ii*batch_size==len(stest_in):
break
bs=[ii*batch_size, min((ii+1)*batch_size,len(stest_in))]
rs = bs[1] - bs[0]
if bs[1]>=len(stest_in):
bs=[len(stest_in)-batch_size, len(stest_in)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(stest_in, stest_out, input_size, output_size, bs, lm_train=False)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
tescores.append(batch_norm_edit_score(rout_list[:rs], bout_list[:rs], 0.995))
#pre, rec = set_score_pre(np.asarray([stest_out[ii]]), np.asarray([out_list]))
#tescores2.append(pre)
#tescores3.append(rec)
# print(pro_list_test)
# print(big_out_list)
# rec, pre, fsc = set_score_hist(np.asarray([pro_list_test]),np.asarray([big_out_list]))
tloss = np.mean(losses)
tscore = np.mean(tescores)
print('tr edit {} vs te edit {}'.format(np.mean(trscores), np.mean(tescores)))
#print('test rec {} prec {}'.format(np.mean(tescores2), np.mean(tescores3)))
print('test loss {}'.format(tloss))
summary.value.add(tag='train_edit', simple_value=np.mean(trscores))
summary.value.add(tag='test_edit', simple_value=np.mean(tescores))
summary.value.add(tag='test_loss', simple_value=tloss)
#summary.value.add(tag='test_recall', simple_value=np.mean(tescores2))
#summary.value.add(tag='test_precision', simple_value=np.mean(tescores3))
train_writer.add_summary(summary, i)
train_writer.flush()
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time))
print("\tApprox. time to completion: %.2f hours" % (estimated_time))
start_time_100 = time.time()
last_100_losses = []
if minscore>tscore:
minscore=tscore
llprint("\nSaving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, ncomputer.print_config())
except KeyboardInterrupt:
llprint("\nSaving Checkpoint ... "),
def moodle_test():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_moodle')
batch_size = 10
main_dir='./data/BusinessProcess/Moodle/'
_,_,_,char2label=load_dict()
str_in, strain_out, stest_in, stest_out = load_sequence(main_dir)
print(str_in[:10])
print(strain_out[:10])
print(stest_in[:10])
print(stest_out[:10])
print('num train {}'.format(len(str_in)))
print('num test {}'.format(len(stest_in)))
print('dim in {}'.format(len(char2label)))
print('dim out {}'.format(len(char2label)))
input_size = len(char2label)+1
output_size = len(char2label)+1
sequence_max_length = 100
words_count = 64
word_size = 100
read_heads = 1
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim=100,
use_emb=False,
use_mem=True,
decoder_mode=True,
dual_controller=False,
write_protect=False,
dual_emb=True
)
output, prob, loss, apply_gradients = ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
ncomputer.restore(session, ckpts_dir, ncomputer.print_config())
print('-----')
tescores = []
losses = []
ntb=len(stest_in)//batch_size+1
all_preds=[]
for ii in range(ntb):
if ii*batch_size==len(stest_in):
break
bs=[ii*batch_size, min((ii+1)*batch_size,len(stest_in))]
rs = bs[1] - bs[0]
if bs[1]>=len(stest_in):
bs=[len(stest_in)-batch_size, len(stest_in)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(stest_in, stest_out, input_size, output_size, bs)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
for bb in bout_list[:rs]:
all_preds.append(bb)
tescores.append(batch_norm_edit_score(rout_list[:rs], bout_list[:rs], 0.995))
prefile=main_dir+'/{}.test_predict.txt'.format(ncomputer.print_config())
write_predict(prefile, all_preds)
predict_seq=load_single_sequence(prefile)
tescores2=[]
for rseq,pseq in zip(stest_out, predict_seq):
s=norm_edit_score(rseq,pseq)
tescores2.append(s)
tloss = np.mean(losses)
print('test ed {}'.format(np.mean(tescores)))
print('test ed {}'.format(np.mean(tescores2)))
#print('test rec {} prec {}'.format(np.mean(tescores2), np.mean(tescores3)))
print('test loss {}'.format(tloss))
def financial_log_train():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_financial_log')
batch_size = 10
_,_,_,char2label=load_dict('./data/BusinessProcess/Financial_Log/')
str_in, strain_out, stest_in, stest_out = load_sequence('./data/BusinessProcess/Financial_Log/')
print(str_in[:10])
print(strain_out[:10])
print(stest_in[:10])
print(stest_out[:10])
print('num train {}'.format(len(str_in)))
print('num test {}'.format(len(stest_in)))
print('dim in {}'.format(len(char2label)))
print('dim out {}'.format(len(char2label)))
input_size = len(char2label)+1
output_size = len(char2label)+1
sequence_max_length = 100
words_count = 64
word_size = 64
read_heads = 1
iterations = 10000
max_train_iterations = 100000
start_step = 0
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim = 100,
use_emb=False,
use_mem=True,
decoder_mode=True,
dual_controller=True,
write_protect=True,
)
output, prob, loss, apply_gradients = ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
last_100_losses = []
start = 0 if start_step == 0 else start_step + 1
end = start_step + iterations + 1
minscore=1000
start_time_100 = time.time()
avg_100_time = 0.
avg_counter = 0
train_writer = tf.summary.FileWriter('./data/log_financial_log/', session.graph)
for i in range(start, end + 1):
try:
llprint("\rIteration %d/%d" % (i, end))
input_vec, output_vec, seq_len, decoder_point, masks, _ = \
prepare_sample_batch(str_in, strain_out, input_size, output_size, bs=batch_size, lm_train=False)
summerize = (i % 100 == 0)
if i<=max_train_iterations:
loss_value, _ = session.run([
loss,
apply_gradients
], feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks,
})
else:
loss_value = session.run(loss, feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks,
})
last_100_losses.append(loss_value)
if summerize:
llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses)))
summary = tf.Summary()
summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses))
trscores = []
for ii in range(10):
input_vec, output_vec, seq_len, decoder_point, masks, rout_list =\
prepare_sample_batch(str_in, strain_out, input_size, output_size, bs=batch_size, lm_train=False)
out = session.run([prob], feed_dict={ncomputer.input_data: input_vec,
ncomputer.decoder_point: decoder_point,
ncomputer.sequence_length: seq_len,
ncomputer.mask:masks,
ncomputer.target_output: output_vec,
ncomputer.teacher_force: ncomputer.get_bool_rand(seq_len,0)})
out = np.reshape(np.asarray(out), [-1, seq_len,output_size])
out = np.argmax(out, axis=-1)
bout_list=[]
# print('{} vs {}'.format(seq_len,out.shape[1]))
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
trscores.append(batch_norm_edit_score(rout_list, bout_list,0.95))
print('-----')
tescores = []
losses = []
ntb=len(stest_in)//batch_size+1
for ii in range(ntb):
if ii*batch_size==len(stest_in):
break
bs=[ii*batch_size, min((ii+1)*batch_size,len(stest_in))]
rs = bs[1] - bs[0]
if bs[1]>=len(stest_in):
bs=[len(stest_in)-batch_size, len(stest_in)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(stest_in, stest_out, input_size, output_size, bs, lm_train=False)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks,
})
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
tescores.append(batch_norm_edit_score(rout_list[:rs], bout_list[:rs], 0.995))
#pre, rec = set_score_pre(np.asarray([stest_out[ii]]), np.asarray([out_list]))
#tescores2.append(pre)
#tescores3.append(rec)
# print(pro_list_test)
# print(big_out_list)
# rec, pre, fsc = set_score_hist(np.asarray([pro_list_test]),np.asarray([big_out_list]))
tloss = np.mean(losses)
tscore = np.mean(tescores)
print('tr edit {} vs te edit {}'.format(np.mean(trscores), np.mean(tescores)))
#print('test rec {} prec {}'.format(np.mean(tescores2), np.mean(tescores3)))
print('test loss {}'.format(tloss))
summary.value.add(tag='train_edit', simple_value=np.mean(trscores))
summary.value.add(tag='test_edit', simple_value=np.mean(tescores))
summary.value.add(tag='test_loss', simple_value=tloss)
#summary.value.add(tag='test_recall', simple_value=np.mean(tescores2))
#summary.value.add(tag='test_precision', simple_value=np.mean(tescores3))
train_writer.add_summary(summary, i)
train_writer.flush()
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time))
print("\tApprox. time to completion: %.2f hours" % (estimated_time))
start_time_100 = time.time()
last_100_losses = []
if minscore>tscore:
minscore=tscore
llprint("\nSaving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, ncomputer.print_config())
except KeyboardInterrupt:
llprint("\nSaving Checkpoint ... "),
def financial_log_test():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_Financial_Log')
batch_size = 10
main_dir='./data/BusinessProcess/Financial_Log/'
_, _, _, char2label = load_dict('./data/BusinessProcess/Financial_Log/')
str_in, strain_out, stest_in, stest_out = load_sequence(main_dir)
print(str_in[:10])
print(strain_out[:10])
print(stest_in[:10])
print(stest_out[:10])
print('num train {}'.format(len(str_in)))
print('num test {}'.format(len(stest_in)))
print('dim in {}'.format(len(char2label)))
print('dim out {}'.format(len(char2label)))
input_size = len(char2label)+1
output_size = len(char2label)+1
sequence_max_length = 100
words_count = 64
word_size = 64
read_heads = 1
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim=100,
use_emb=False,
use_mem=True,
decoder_mode=True,
dual_controller=True,
write_protect=True,
dual_emb=True
)
output, prob, loss, apply_gradients = ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
ncomputer.restore(session, ckpts_dir, ncomputer.print_config())
print('-----')
tescores = []
losses = []
ntb=len(stest_in)//batch_size+1
all_preds=[]
for ii in range(ntb):
if ii*batch_size==len(stest_in):
break
bs=[ii*batch_size, min((ii+1)*batch_size,len(stest_in))]
rs = bs[1] - bs[0]
if bs[1]>=len(stest_in):
bs=[len(stest_in)-batch_size, len(stest_in)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(stest_in, stest_out, input_size, output_size, bs)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
for bb in bout_list[:rs]:
all_preds.append(bb)
tescores.append(batch_norm_edit_score(rout_list[:rs], bout_list[:rs], 0.995))
prefile=main_dir+'/{}.test_predict.txt'.format(ncomputer.print_config())
write_predict(prefile, all_preds)
predict_seq=load_single_sequence(prefile)
tescores2=[]
for rseq,pseq in zip(stest_out, predict_seq):
s=norm_edit_score(rseq,pseq)
tescores2.append(s)
tloss = np.mean(losses)
print('test ed {}'.format(np.mean(tescores)))
print('test ed {}'.format(np.mean(tescores2)))
#print('test rec {} prec {}'.format(np.mean(tescores2), np.mean(tescores3)))
print('test loss {}'.format(tloss))
def ibm_train():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_ibm')
batch_size = 10
_,_,_,char2label=load_dict('./data/BusinessProcess/IBM_Anonymous/')
str_in, strain_out, stest_in, stest_out = load_sequence('./data/BusinessProcess/IBM_Anonymous/')
print(str_in[:10])
print(strain_out[:10])
print(stest_in[:10])
print(stest_out[:10])
print('num train {}'.format(len(str_in)))
print('num test {}'.format(len(stest_in)))
print('dim in {}'.format(len(char2label)))
print('dim out {}'.format(len(char2label)))
input_size = len(char2label)+1
output_size = len(char2label)+1
sequence_max_length = 100
words_count = 64
word_size = 100
read_heads = 1
iterations = 50000
max_train_iterations = 100000
start_step = 0
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim = 100,
use_emb=False,
use_mem=False,
decoder_mode=True,
dual_controller=False,
write_protect=False,
)
output, prob, loss, apply_gradients = ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
last_100_losses = []
start = 0 if start_step == 0 else start_step + 1
end = start_step + iterations + 1
minscore=1000
start_time_100 = time.time()
avg_100_time = 0.
avg_counter = 0
train_writer = tf.summary.FileWriter('./data/log_ibm/', session.graph)
for i in range(start, end + 1):
try:
llprint("\rIteration %d/%d" % (i, end))
input_vec, output_vec, seq_len, decoder_point, masks, _ = \
prepare_sample_batch(str_in, strain_out, input_size, output_size, bs=batch_size, lm_train=True)
summerize = (i % 100 == 0)
if i<=max_train_iterations:
loss_value, _ = session.run([
loss,
apply_gradients
], feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
else:
loss_value = session.run(loss, feed_dict={
ncomputer.input_data: input_vec,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
last_100_losses.append(loss_value)
if summerize:
llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses)))
summary = tf.Summary()
summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses))
trscores = []
for ii in range(10):
input_vec, output_vec, seq_len, decoder_point, masks, rout_list =\
prepare_sample_batch(str_in, strain_out, input_size, output_size, bs=batch_size, lm_train=True)
out = session.run([prob], feed_dict={ncomputer.input_data: input_vec,
ncomputer.decoder_point: decoder_point,
ncomputer.sequence_length: seq_len,
ncomputer.mask:masks})
out = np.reshape(np.asarray(out), [-1, seq_len,output_size])
out = np.argmax(out, axis=-1)
bout_list=[]
# print('{} vs {}'.format(seq_len,out.shape[1]))
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
trscores.append(batch_norm_edit_score(rout_list, bout_list,0.95))
print('-----')
tescores = []
losses = []
ntb=len(stest_in)//batch_size+1
for ii in range(ntb):
if ii*batch_size==len(stest_in):
break
bs=[ii*batch_size, min((ii+1)*batch_size,len(stest_in))]
rs = bs[1] - bs[0]
if bs[1]>=len(stest_in):
bs=[len(stest_in)-batch_size, len(stest_in)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(stest_in, stest_out, input_size, output_size, bs, lm_train=True)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
tescores.append(batch_norm_edit_score(rout_list[:rs], bout_list[:rs], 0.995))
#pre, rec = set_score_pre(np.asarray([stest_out[ii]]), np.asarray([out_list]))
#tescores2.append(pre)
#tescores3.append(rec)
# print(pro_list_test)
# print(big_out_list)
# rec, pre, fsc = set_score_hist(np.asarray([pro_list_test]),np.asarray([big_out_list]))
tloss = np.mean(losses)
tscore = np.mean(tescores)
print('tr edit {} vs te edit {}'.format(np.mean(trscores), np.mean(tescores)))
#print('test rec {} prec {}'.format(np.mean(tescores2), np.mean(tescores3)))
print('test loss {}'.format(tloss))
summary.value.add(tag='train_edit', simple_value=np.mean(trscores))
summary.value.add(tag='test_edit', simple_value=np.mean(tescores))
summary.value.add(tag='test_loss', simple_value=tloss)
#summary.value.add(tag='test_recall', simple_value=np.mean(tescores2))
#summary.value.add(tag='test_precision', simple_value=np.mean(tescores3))
train_writer.add_summary(summary, i)
train_writer.flush()
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time))
print("\tApprox. time to completion: %.2f hours" % (estimated_time))
start_time_100 = time.time()
last_100_losses = []
if minscore>tscore:
minscore=tscore
llprint("\nSaving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, ncomputer.print_config())
except KeyboardInterrupt:
llprint("\nSaving Checkpoint ... ")
def ibm_test():
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_ibm')
batch_size = 10
main_dir='./data/BusinessProcess/IBM_Anonymous/'
_, _, _, char2label = load_dict('./data/BusinessProcess/IBM_Anonymous/')
str_in, strain_out, stest_in, stest_out = load_sequence(main_dir)
print(str_in[:10])
print(strain_out[:10])
print(stest_in[:10])
print(stest_out[:10])
print('num train {}'.format(len(str_in)))
print('num test {}'.format(len(stest_in)))
print('dim in {}'.format(len(char2label)))
print('dim out {}'.format(len(char2label)))
input_size = len(char2label)+1
output_size = len(char2label)+1
sequence_max_length = 100
words_count = 64
word_size = 100
read_heads = 1
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
sequence_max_length,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim=100,
use_emb=False,
use_mem=True,
decoder_mode=True,
dual_controller=False,
write_protect=False,
dual_emb=True
)
output, prob, loss, apply_gradients = ncomputer.build_loss_function_mask()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
ncomputer.restore(session, ckpts_dir, ncomputer.print_config())
print('-----')
tescores = []
losses = []
ntb=len(stest_in)//batch_size+1
all_preds=[]
for ii in range(ntb):
if ii*batch_size==len(stest_in):
break
bs=[ii*batch_size, min((ii+1)*batch_size,len(stest_in))]
rs = bs[1] - bs[0]
if bs[1]>=len(stest_in):
bs=[len(stest_in)-batch_size, len(stest_in)]
input_data, target_output, seq_len, decoder_point, masks, rout_list = \
prepare_sample_batch(stest_in, stest_out, input_size, output_size, bs)
out, loss_v = session.run([prob, loss], feed_dict={ncomputer.input_data: input_data,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decoder_point: decoder_point,
ncomputer.mask:masks
})
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(decoder_point, out.shape[1]):
if out[b][io]==0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
for bb in bout_list[:rs]:
all_preds.append(bb)
tescores.append(batch_norm_edit_score(rout_list[:rs], bout_list[:rs], 0.995))
prefile=main_dir+'/{}.test_predict.txt'.format(ncomputer.print_config())
write_predict(prefile, all_preds)
predict_seq=load_single_sequence(prefile)
tescores2=[]
for rseq,pseq in zip(stest_out, predict_seq):
s=norm_edit_score(rseq,pseq)
tescores2.append(s)
tloss = np.mean(losses)
print('test ed {}'.format(np.mean(tescores)))
print('test ed {}'.format(np.mean(tescores2)))
#print('test rec {} prec {}'.format(np.mean(tescores2), np.mean(tescores3)))
print('test loss {}'.format(tloss))
if __name__ == '__main__':
moodle_train()
# moodle_test()
# financial_log_train()
# financial_log_test()
# ibm_train
# ibm_test()
|
|
# Copyright 2012 IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import availability_zone
from nova import availability_zones
from nova import context
from nova import db
from nova.openstack.common import jsonutils
from nova import servicegroup
from nova import test
from nova.tests.api.openstack import fakes
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
return {'binary': binary,
'availability_zone': availability_zone,
'available_zones': availability_zone,
'created_at': created_at,
'updated_at': updated_at,
'host': host,
'disabled': disabled}
if disabled:
return [__fake_service("nova-compute", "zone-2",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-scheduler", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", True),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", True)]
else:
return [__fake_service("nova-compute", "zone-1",
datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-sched", "internal",
datetime.datetime(2012, 11, 14, 9, 57, 03, 0),
datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
"fake_host-1", False),
__fake_service("nova-network", "internal",
datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
"fake_host-2", False)]
def fake_service_is_up(self, service):
return service['binary'] != u"nova-network"
def fake_set_availability_zones(context, services):
return services
class AvailabilityZoneApiTest(test.TestCase):
def setUp(self):
super(AvailabilityZoneApiTest, self).setUp()
self.stubs.Set(db, 'service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
def test_availability_zone_index(self):
req = webob.Request.blank('/v2/fake/os-availability-zone')
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertTrue('availabilityZoneInfo' in resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 2)
self.assertEqual(zones[0]['zoneName'], u'zone-1')
self.assertTrue(zones[0]['zoneState']['available'])
self.assertIsNone(zones[0]['hosts'])
self.assertEqual(zones[1]['zoneName'], u'zone-2')
self.assertFalse(zones[1]['zoneState']['available'])
self.assertIsNone(zones[1]['hosts'])
def test_availability_zone_detail(self):
def _formatZone(zone_dict):
result = []
# Zone tree view item
result.append({'zoneName': zone_dict['zoneName'],
'zoneState': u'available'
if zone_dict['zoneState']['available'] else
u'not available'})
if zone_dict['hosts'] is not None:
for (host, services) in zone_dict['hosts'].items():
# Host tree view item
result.append({'zoneName': u'|- %s' % host,
'zoneState': u''})
for (svc, state) in services.items():
# Service tree view item
result.append({'zoneName': u'| |- %s' % svc,
'zoneState': u'%s %s %s' % (
'enabled' if state['active'] else
'disabled',
':-)' if state['available'] else
'XXX',
jsonutils.to_primitive(
state['updated_at']))})
return result
def _assertZone(zone, name, status):
self.assertEqual(zone['zoneName'], name)
self.assertEqual(zone['zoneState'], status)
availabilityZone = availability_zone.AvailabilityZoneController()
req = webob.Request.blank('/v2/fake/os-availability-zone/detail')
req.method = 'GET'
req.environ['nova.context'] = context.get_admin_context()
resp_dict = availabilityZone.detail(req)
self.assertTrue('availabilityZoneInfo' in resp_dict)
zones = resp_dict['availabilityZoneInfo']
self.assertEqual(len(zones), 3)
''' availabilityZoneInfo field content in response body:
[{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at': datetime(2012, 12, 26, 14, 45, 25)}},
'fake_host-2': {
'nova-network': {'active': True, 'available': False,
'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
'''
l0 = [u'zone-1', u'available']
l1 = [u'|- fake_host-1', u'']
l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000']
l3 = [u'internal', u'available']
l4 = [u'|- fake_host-1', u'']
l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000']
l6 = [u'|- fake_host-2', u'']
l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000']
l8 = [u'zone-2', u'not available']
z0 = _formatZone(zones[0])
z1 = _formatZone(zones[1])
z2 = _formatZone(zones[2])
self.assertEqual(len(z0), 3)
self.assertEqual(len(z1), 5)
self.assertEqual(len(z2), 1)
_assertZone(z0[0], l0[0], l0[1])
_assertZone(z0[1], l1[0], l1[1])
_assertZone(z0[2], l2[0], l2[1])
_assertZone(z1[0], l3[0], l3[1])
_assertZone(z1[1], l4[0], l4[1])
_assertZone(z1[2], l5[0], l5[1])
_assertZone(z1[3], l6[0], l6[1])
_assertZone(z1[4], l7[0], l7[1])
_assertZone(z2[0], l8[0], l8[1])
class AvailabilityZoneSerializerTest(test.TestCase):
def test_availability_zone_index_detail_serializer(self):
def _verify_zone(zone_dict, tree):
self.assertEqual(tree.tag, 'availabilityZone')
self.assertEqual(zone_dict['zoneName'], tree.get('name'))
self.assertEqual(str(zone_dict['zoneState']['available']),
tree[0].get('available'))
for _idx, host_child in enumerate(tree[1]):
self.assertTrue(host_child.get('name') in zone_dict['hosts'])
svcs = zone_dict['hosts'][host_child.get('name')]
for _idx, svc_child in enumerate(host_child[0]):
self.assertTrue(svc_child.get('name') in svcs)
svc = svcs[svc_child.get('name')]
self.assertEqual(len(svc_child), 1)
self.assertEqual(str(svc['available']),
svc_child[0].get('available'))
self.assertEqual(str(svc['active']),
svc_child[0].get('active'))
self.assertEqual(str(svc['updated_at']),
svc_child[0].get('updated_at'))
serializer = availability_zone.AvailabilityZonesTemplate()
raw_availability_zones = \
[{'zoneName': 'zone-1',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-compute': {'active': True, 'available': True,
'updated_at':
datetime.datetime(
2012, 12, 26, 14, 45, 25)}}}},
{'zoneName': 'internal',
'zoneState': {'available': True},
'hosts': {'fake_host-1': {
'nova-sched': {'active': True, 'available': True,
'updated_at':
datetime.datetime(
2012, 12, 26, 14, 45, 25)}},
'fake_host-2': {
'nova-network': {'active': True,
'available': False,
'updated_at':
datetime.datetime(
2012, 12, 26, 14, 45, 24)}}}},
{'zoneName': 'zone-2',
'zoneState': {'available': False},
'hosts': None}]
text = serializer.serialize(
dict(availabilityZoneInfo=raw_availability_zones))
tree = etree.fromstring(text)
self.assertEqual('availabilityZones', tree.tag)
self.assertEqual(len(raw_availability_zones), len(tree))
for idx, child in enumerate(tree):
_verify_zone(raw_availability_zones[idx], child)
|
|
############################################################################
##
## Copyright (C) 2006-2008 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
import os, sys
sys.path.append('../../../vistrails')
from parse_cdat_xml_file import parse_cdat_xml_file
from cdat_domain import CDATModule
#cdat package identifiers
cp_version = '0.2'
cp_identifier = 'edu.utah.sci.vistrails.cdat'
cp_name = 'CDAT'
def write_init(output_file, classes_lines, init_lines):
"""write_init(output_file: str, classes_lines: list, init_lines: list)
-> None
Writes the necessary contents for the package init file"""
# cdat dependencies
init_lines.append("\n\n")
init_lines.append("def package_dependencies():\n")
#init_lines.append(" return ['edu.utah.sci.vistrails.numpyscipy']\n")
init_lines.append(" return []\n")
init_lines.append("\n\n")
init_lines.append("def package_requirements():\n")
init_lines.append(" import core.requirements\n")
init_lines.append(" if not core.requirements.python_module_exists('vcs'):\n")
init_lines.append(" raise core.requirements.MissingRequirements('vcs')\n")
init_lines.append(" if not core.requirements.python_module_exists('cdms2'):\n")
init_lines.append(" raise core.requirements.MissingRequirements('cdms2')\n")
init_lines.append(" if not core.requirements.python_module_exists('cdutil'):\n")
init_lines.append(" raise core.requirements.MissingRequirements('cdutil')\n")
init_lines.append(" import vcs, cdms2, cdutil\n")
init_lines.append("\n\n")
header = open("init_inc.py").readlines()
header.append("\n\n")
header.append('version = "' + cp_version + '"\n')
header.append('identifier = "' + cp_identifier + '"\n')
header.append('name = "' + cp_name + '"\n\n')
outfile = open(output_file, "w")
outfile.writelines(header)
outfile.writelines(classes_lines)
outfile.writelines(init_lines)
outfile.close()
def parse_files(input_files):
modules = []
for f in input_files:
modules.append(parse_cdat_xml_file(f))
return modules
def add_canvas_ports_to_canvas_modules(canvas, lines):
canvas.add_extra_input_port_to_all_modules(lines,
port_name='canvas',
port_type='Canvas',
doc='Canvas object',
optional=False
)
canvas.add_extra_output_port_to_all_modules(lines,
port_name='canvas',
port_type='Canvas',
doc='Canvas object',
optional=False
)
def add_canvas_module(canvas,init_lines,class_lines):
canvas.write_extra_module_definition(class_lines,'Canvas')
canvas.register_extra_vistrails_module(init_lines,'Canvas')
def get_image_compute_method(action, ident=''):
lines = []
lines.append(ident + "def compute(self):\n")
lines.append(ident + " if self.has_input('canvas'):\n")
lines.append(ident + " canvas = self.get_input('canvas')\n")
lines.append(ident + " else:\n")
lines.append(ident + " canvas = vcs.init()\n")
lines.append(ident + " args = []\n")
for inp in action._inputs:
lines.append(ident + " %s = None\n"%inp._name)
for inst in inp._valid_instances:
if inp._valid_instances.index(inst) == 0:
lines.append(ident + " if self.has_input('%s'):\n" % inst)
lines.append(ident + " %s = self.get_input('%s')\n" % (inp._name, inst))
lines.append(ident + " args.append(%s)\n"%inp._name)
else:
lines.append(ident + " elif self.has_input('%s'):\n" % inst)
lines.append(ident + " %s = self.get_input('%s')\n" % (inp._name, inst))
lines.append(ident + " args.append(%s)\n"%inp._name)
if inp._required:
lines.append("\n"+ ident +" # %s is a required port\n" % inp._name)
lines.append(ident + " if %s is None:\n" % inp._name)
lines.append(ident + " raise ModuleError(self, \"'%s' is a mandatory port\")\n" % inp._name)
lines.append(ident + " ofile = core.modules.basic_modules.File()\n")
lines.append(ident + " ofile.name = %s\n"%action._inputs[0]._name)
lines.append(ident + " canvas.%s(*args)\n"%action._name)
lines.append(ident + " self.set_output('file',ofile)\n")
lines.append("\n")
return lines
def get_cdms2_compute_method(action, ident=''):
lines = []
lines.append(ident + "def compute(self):\n")
lines.append(ident + " args = []\n")
for inp in action._inputs:
lines.append(ident + " %s = None\n"%inp._name)
for inst in inp._valid_instances:
if inp._valid_instances.index(inst) == 0:
lines.append(ident + " if self.has_input('%s'):\n" % inst)
lines.append(ident + " %s = self.get_input('%s')\n" % (inp._name, inst))
lines.append(ident + " args.append(%s)\n"%inp._name)
else:
lines.append(ident + " elif self.has_input('%s'):\n" % inst)
lines.append(ident + " %s = self.get_input('%s')\n" % (inp._name, inst))
lines.append(ident + " args.append(%s)\n"%inp._name)
if inp._required:
lines.append("\n"+ ident +" # %s is a required port\n" % inp._name)
lines.append(ident + " if %s is None:\n" % inp._name)
lines.append(ident + " raise ModuleError(self, \"'%s' is a mandatory port\")\n" % inp._name)
lines.append(ident + " res = cdms2.%s(*args)\n"%action._name)
lines.append(ident + " self.set_output('%s',res)\n"%action._outputs[0]._name)
lines.append("\n")
return lines
def get_CdmsFile_compute_method(action, ident=''):
lines = []
lines.append(ident + "def compute(self):\n")
lines.append(ident + " self.check_input('cdmsfile')\n")
lines.append(ident + " cdmsfile = self.get_input('cdmsfile')\n")
lines.append(ident + " args = []\n")
for inp in action._inputs:
lines.append(ident + " %s = None\n"%inp._name)
for inst in inp._valid_instances:
if inp._valid_instances.index(inst) == 0:
lines.append(ident + " if self.has_input('%s'):\n" % inst)
lines.append(ident + " %s = self.get_input('%s')\n" % (inp._name, inst))
lines.append(ident + " args.append(%s)\n"%inp._name)
else:
lines.append(ident + " elif self.has_input('%s'):\n" % inst)
lines.append(ident + " %s = self.get_input('%s')\n" % (inp._name, inst))
lines.append(ident + " args.append(%s)\n"%inp._name)
if inp._required:
lines.append("\n"+ ident +" # %s is a required port\n" % inp._name)
lines.append(ident + " if %s is None:\n" % inp._name)
lines.append(ident + " raise ModuleError(self, \"'%s' is a mandatory port\")\n" % inp._name)
lines.append(ident + " res = cdmsfile.%s(*args)\n"%action._name)
lines.append(ident + " self.set_output('%s',res)\n"%action._outputs[0]._name)
lines.append("\n")
return lines
if __name__ == '__main__':
# usage:
args = sys.argv
if len(args) > 2:
root_dir = args[1]
output_file = args[2]
else:
print "Usage: %s root_dir output_file" % args[0]
sys.exit(0)
xmlfiles = []
input_files = os.walk(root_dir)
for (d, tree, files) in input_files:
for f in files:
if os.path.isfile(os.path.join(d,f)) and f.endswith(".xml"):
xmlfiles.append(os.path.join(d,f))
modules = parse_files(xmlfiles)
extra_init_lines = []
init_lines = []
extra_init_lines.append("\ndef initialize(*args, **keywords):\n")
extra_init_lines.append(" reg = core.modules.module_registry.get_module_registry()\n\n")
class_lines = []
extra_class_lines = []
print "%s xml file(s) found."% len(modules)
CDATModule.write_extra_module_definitions_init(extra_class_lines)
for m in modules:
print "codepath: %s has %s Vistrails Modules."%(m._codepath, len(m._actions))
m.build_vistrails_modules_dict()
for m in modules:
m.register_vistrails_modules(init_lines)
if m._codepath == 'vcs.Canvas.Canvas':
for a in m._actions:
if a._name == 'png':
a.write_module_definition(class_lines,
ident='',
compute_method=get_image_compute_method(a,ident=" "))
a.register_extra_output_port('file',
'core.modules.basic_modules.File',
init_lines,
"File output",
False)
else:
a.write_module_definition(class_lines)
add_canvas_ports_to_canvas_modules(m,init_lines)
add_canvas_module(m,extra_init_lines, extra_class_lines)
elif m._codepath == "cdms2.dataset.CdmsFile":
for a in m._actions:
if a._name == "__call__":
a.write_module_definition(class_lines,
ident='',
compute_method=get_CdmsFile_compute_method(a,
ident=" "))
else:
a.write_module_definition(class_lines)
m.add_extra_input_port_to_all_modules(init_lines,
port_name='cdmsfile',
port_type='CdmsFile',
doc='cdmsfile',
optional=False
)
elif m._codepath == 'cdms2':
for a in m._actions:
if a._name == "open":
a.write_module_definition(class_lines,
ident='',
compute_method=get_cdms2_compute_method(a,
ident=" "))
else:
a.write_module_definition(class_lines)
else:
m.write_module_definitions(class_lines)
CDATModule.write_extra_module_definitions(extra_class_lines)
CDATModule.register_extra_vistrails_modules(extra_init_lines)
cdatwindow_init_lines = open("cdatwindow_init_inc.py").readlines()
extra_init_lines.extend(cdatwindow_init_lines)
extra_init_lines.extend(init_lines)
extra_class_lines.extend(class_lines)
write_init(output_file, extra_class_lines, extra_init_lines)
|
|
"""
Classes and functions used to visualize data for thermo scientific analyzers
"""
from pandas import Series, DataFrame
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates as d
import os
import math
import glob
import matplotlib
import warnings
import sys
__all__ = ['diurnal_plot','diurnal_plot_single', 'ThermoPlot']
def diurnal_plot(data, dates=[], shaded=False, title="Diurnal Profile of Trace Gases", xlabel="Local Time: East St. Louis, MO"):
'''
If plotting the entire DataFrame (data), choose all_data=True, else choose all_data=False
and declare the date or dates to plot as a list. `data` should be a pandas core DataFrame
with time index and each trace gas concentration as a column
returns a single plot for NOx, SO2, and O3
>>>
'''
# Check to make sure the data is a valid dataframe
if not isinstance(data, pd.DataFrame):
print ("data is not a pandas DataFrame, thus this will not end well for you.")
exit
# If length of dates is zero, plot everything
if len(dates) == 0:
# Plot everything, yo!
pass
elif len(dates) == 1:
# Plot just this date
data = data[dates[0]]
elif len(dates) == 2:
# Plot between these dates
data = data[dates[0]:dates[1]]
else:
sys.exit("Dates are not properly configured.")
# Add columns for time to enable simple diurnal trends to be found
data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
# Group the data by time and grab the statistics
grouped = data.groupby('Time').describe().unstack()
# set the index to be a str
grouped.index = pd.to_datetime(grouped.index.astype(str))
# Plot
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(10,9), sharex=True)
# Set plot titles and labels
ax1.set_title(title, fontsize=14)
ax1.set_ylabel(r'$\ [NO_x] (ppb)$', fontsize=14, weight='bold')
ax2.set_ylabel(r'$\ [SO_2] (ppb)$', fontsize=14)
ax3.set_ylabel(r'$\ [O_3] (ppb)$', fontsize=14)
ax3.set_xlabel(xlabel, fontsize=14)
# Make the ticks invisible on the first and second plots
plt.setp( ax1.get_xticklabels(), visible=False)
plt.setp( ax2.get_xticklabels(), visible=False)
# Set y min to zero just in case:
ax1.set_ylim(0,grouped['nox']['mean'].max()*1.05)
ax2.set_ylim(0,grouped['so2']['mean'].max()*1.05)
ax3.set_ylim(0,grouped['o3']['mean'].max()*1.05)
# Plot means
ax1.plot(grouped.index, grouped['nox']['mean'],'g', linewidth=2.0)
ax2.plot(grouped.index, grouped['so2']['mean'], 'r', linewidth=2.0)
ax3.plot(grouped.index, grouped['o3']['mean'], 'b', linewidth=2.0)
# If shaded=true, plot trends
if shaded == True:
ax1.plot(grouped.index, grouped['nox']['75%'],'g')
ax1.plot(grouped.index, grouped['nox']['25%'],'g')
ax1.set_ylim(0,grouped['nox']['75%'].max()*1.05)
ax1.fill_between(grouped.index, grouped['nox']['mean'], grouped['nox']['75%'], alpha=.5, facecolor='green')
ax1.fill_between(grouped.index, grouped['nox']['mean'], grouped['nox']['25%'], alpha=.5, facecolor='green')
ax2.plot(grouped.index, grouped['so2']['75%'],'r')
ax2.plot(grouped.index, grouped['so2']['25%'],'r')
ax2.set_ylim(0,grouped['so2']['75%'].max()*1.05)
ax2.fill_between(grouped.index, grouped['so2']['mean'], grouped['so2']['75%'], alpha=.5, facecolor='red')
ax2.fill_between(grouped.index, grouped['so2']['mean'], grouped['so2']['25%'], alpha=.5, facecolor='red')
ax3.plot(grouped.index, grouped['o3']['75%'],'b')
ax3.plot(grouped.index, grouped['o3']['25%'],'b')
ax3.set_ylim(0,grouped['o3']['75%'].max()*1.05)
ax3.fill_between(grouped.index, grouped['o3']['mean'], grouped['o3']['75%'], alpha=.5, facecolor='blue')
ax3.fill_between(grouped.index, grouped['o3']['mean'], grouped['o3']['25%'], alpha=.5, facecolor='blue')
# Get/Set xticks
ticks = ax1.get_xticks()
ax3.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
ax3.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax3.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
# Make the layout tight to get rid of some whitespace
plt.tight_layout()
plt.show()
return (fig, (ax1, ax2, ax3))
def diurnal_plot_single(data, model='', dates=[], shaded=False, color1 = 'blue',
title="Diurnal Profile of Trace Gases", xlabel="Local Time: East St. Louis, MO",
ylabel=r'$\ [NO_x] (ppb)$'):
'''
`data` should be a pandas core DataFrame with time index and each trace gas concentration as a column
returns a single plot for one of the three analyzers.
>>>diurnal_plot_single(data,model='o3', ylabel='O3', shaded=True, color1='green')
'''
# Check to make sure the data is a valid dataframe
if not isinstance(data, pd.DataFrame):
sys.exit("data is not a pandas DataFrame, thus this will not end well for you.")
# Check to make sure the model is valid
if model.lower() not in ['nox','so2','o3','sox']:
sys.exit("Model is not defined correctly: options are ['nox','so2','sox','o3']")
# Set model to predefined variable
if model.lower() == 'nox':
instr = 'nox'
elif model.lower() == 'so2' or model.lower() == 'sox':
instr = 'sox'
else:
instr = 'o3'
# If not plotting all the data, truncate the dataframe to include only the needed data
if len(dates) == 0:
# plot everything
pass
elif len(dates) == 1:
# plot just this date
data = data[dates[0]]
elif len(dates) == 2:
# plot between these dates
data = data[dates[0]:dates[1]]
else:
sys.exit("You have an error with how you defined your dates")
# Add columns for time to enable simple diurnal trends to be found
data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
# Group the data by time and grab the statistics
grouped = data.groupby('Time').describe().unstack()
# set the index to be a str
grouped.index = pd.to_datetime(grouped.index.astype(str))
# Plot
fig, ax = plt.subplots(1, figsize=(8,4))
# Set plot titles and labels
ax.set_title(title, fontsize=14)
ax.set_ylabel(ylabel, fontsize=14, weight='bold')
ax.set_xlabel(xlabel, fontsize=14)
# Set y min to zero just in case:
ax.set_ylim(0,grouped[instr]['mean'].max()*1.05)
# Plot means
ax.plot(grouped.index, grouped[instr]['mean'], color1,linewidth=2.0)
# If shaded=true, plot trends
if shaded == True:
ax.plot(grouped.index, grouped[instr]['75%'],color1)
ax.plot(grouped.index, grouped[instr]['25%'],color1)
ax.set_ylim(0,grouped[instr]['75%'].max()*1.05)
ax.fill_between(grouped.index, grouped[instr]['mean'], grouped[instr]['75%'], alpha=.5, facecolor=color1)
ax.fill_between(grouped.index, grouped[instr]['mean'], grouped[instr]['25%'], alpha=.5, facecolor=color1)
# Get/Set xticks
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
# Make the layout tight to get rid of some whitespace
plt.tight_layout()
plt.show()
return (fig, ax)
class ThermoPlot():
'''
Allows for easy plotting of internal instrument data. Currently supports the
following models:
- NO, NO2, NOx (42I)
- O3 (49I)
- SO2 (43I)
'''
def __init__(self, data):
self.data = data
def debug_plot(self, args={}):
'''
Plots thermo scientific instrument data for debugging purposes. The top plot contains internal
instrument data such as flow rates and temperatures. The bottom plot contains trace gas data for the
instrument.
instrument must be set to either nox, so2, sox, or o3
>>> nox = ThermoPlot(data)
>>> f, (a1, a2, a3) = nox.debug_plot()
'''
default_args = {
'xlabel':'Local Time, East St Louis, MO',
'ylabpressure':'Flow (LPM)',
'ylabgas':'Gas Conc. (ppb)',
'ylabtemp':'Temperature (C)',
'title_fontsize':'18',
'labels_fontsize':'14',
'grid':False
}
# Figure out what model we are trying to plot and set instrument specific default args
cols = [i.lower() for i in self.data.columns.values.tolist()]
if 'o3' in cols:
default_args['instrument'] = 'o3'
default_args['title'] = "Debug Plot for " + r'$\ O_{3} $' + ": Model 49I"
default_args['color_o3'] = 'blue'
elif 'sox' in cols or 'so2' in cols:
default_args['instrument'] = 'so2'
default_args['title'] = "Debug Plot for " + r'$\ SO_{2} $' + ": Model 43I"
default_args['color_so2'] = 'green'
elif 'nox' in cols:
default_args['instrument'] = 'nox'
default_args['title'] = "Debug Plot for " + r'$\ NO_{x} $' + ": Model 42I"
default_args['color_no'] = '#FAB923'
default_args['color_nox'] = '#FC5603'
default_args['color_no2'] = '#FAE823'
else:
sys.exit("Could not figure out what isntrument this is for")
# If kwargs are set, replace the default values
for key, val in default_args.iteritems():
if args.has_key(key):
default_args[key] = args[key]
# Set up Plot and all three axes
fig, (ax1, ax3) = plt.subplots(2, figsize=(10,6), sharex=True)
ax2 = ax1.twinx()
# set up axes labels and titles
ax1.set_title(default_args['title'], fontsize=default_args['title_fontsize'])
ax1.set_ylabel(default_args['ylabpressure'], fontsize=default_args['labels_fontsize'])
ax2.set_ylabel(default_args['ylabtemp'], fontsize=default_args['labels_fontsize'])
ax3.set_ylabel(default_args['ylabgas'], fontsize=default_args['labels_fontsize'])
ax3.set_xlabel(default_args['xlabel'], fontsize=default_args['labels_fontsize'])
# Make the ticks invisible on the first and second plots
plt.setp( ax1.get_xticklabels(), visible=False )
# Plot the debug data on the top graph
if default_args['instrument'] == 'o3':
self.data['bncht'].plot(ax=ax2, label=r'$\ T_{bench}$')
self.data['lmpt'].plot(ax=ax2, label=r'$\ T_{lamp}$')
self.data['flowa'].plot(ax=ax1, label=r'$\ Q_{A}$', style='--')
self.data['flowb'].plot(ax=ax1, label=r'$\ Q_{B}$', style='--')
self.data['o3'].plot(ax=ax3, color=default_args['color_o3'], label=r'$\ O_{3}$')
elif default_args['instrument'] == 'so2':
self.data['intt'].plot(ax=ax2, label=r'$\ T_{internal}$')
self.data['rctt'].plot(ax=ax2, label=r'$\ T_{reactor}$')
self.data['smplfl'].plot(ax=ax1, label=r'$\ Q_{sample}$', style='--')
self.data['so2'].plot(ax=ax3, label=r'$\ SO_2 $', color=default_args['color_so2'], ylim=[0,self.data['so2'].max()*1.05])
else:
m = max(self.data['convt'].max(),self.data['intt'].max(),self.data['pmtt'].max())
self.data['convt'].plot(ax=ax2, label=r'$\ T_{converter}$')
self.data['intt'].plot(ax=ax2, label=r'$\ T_{internal}$')
self.data['rctt'].plot(ax=ax2, label=r'$\ T_{reactor}$')
self.data['pmtt'].plot(ax=ax2, label=r'$\ T_{PMT}$')
self.data['smplf'].plot(ax=ax1, label=r'$\ Q_{sample}$', style='--')
self.data['ozonf'].plot(ax=ax1, label=r'$\ Q_{ozone}$', style='--')
self.data['no'].plot(ax=ax3, label=r'$\ NO $', color=default_args['color_no'])
self.data['no2'].plot(ax=ax3, label=r'$\ NO_{2}$', color=default_args['color_no2'])
self.data['nox'].plot(ax=ax3, label=r'$\ NO_{x}$', color=default_args['color_nox'], ylim=(0,math.ceil(self.data.nox.max()*1.05)))
# Legends
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
plt.legend(lines+lines2, labels+labels2, bbox_to_anchor=(1.10, 1), loc=2, borderaxespad=0.)
ax3.legend(bbox_to_anchor=(1.10, 1.), loc=2, borderaxespad=0.)
# Hide grids?
ax1.grid(default_args['grid'])
ax2.grid(default_args['grid'])
ax3.grid(default_args['grid'])
# More of the things..
plt.tight_layout()
plt.show()
return fig, (ax1, ax2, ax3)
|
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
P2PInterface,
mininode_lock,
msg_block,
msg_getdata,
network_thread_join,
network_thread_start,
)
from test_framework.test_framework import VergeTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
wait_until,
)
# P2PInterface is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
class BaseNode(P2PInterface):
def __init__(self):
"""Initialize the P2PInterface
Used to initialize custom properties for the Node that aren't
included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the VergeTestFramework
pass
class ExampleTest(VergeTestFramework):
# Each functional test is a subclass of the VergeTestFramework class.
# Override the set_test_params(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def set_test_params(self):
"""Override test parameters for your individual test.
This method must be overridden and num_nodes must be exlicitly set."""
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished set_test_params") # Oops! Can't run self.log before run_test()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all([self.nodes[0:1]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
VergeTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create P2P connections to two of the nodes
self.nodes[0].add_p2p_connection(BaseNode())
# Start up network handling in another thread. This needs to be called
# after the P2P connections have been created.
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:1]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = 1
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
# We can't add additional P2P connections once the network thread has started. Disconnect the connection
# to node0, wait for the network thread to terminate, then connect to node2. This is specific to
# the current implementation of the network thread and may be improved in future.
self.nodes[0].disconnect_p2ps()
network_thread_join()
self.nodes[2].add_p2p_connection(BaseNode())
network_thread_start()
self.nodes[2].p2p.wait_for_verack()
self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
|
|
#
# flightlines_to_single_CHM_pit_free.py
#
# (c) 2014, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# This LAStools pipeline turns a folder full of LAS
# or LAZ files (assumed to raw flightlines) into a
# single pit-free CHM using the algorithms described
# by A. Khosravipour et al. in Silvilaser 2013. The
# input file is first tiled using lastile with the
# specified tile size. The specified buffer is used
# to avoid edge artifacts. All tiles are then ground
# classified using lasground marking points as ground
# (class 2) and non-ground (class 1). Next the height
# of all points above the ground is computed using
# lasheight and used to height-normalize all the tiles
# in the sense that the height is used to replace the
# z coordinates. Using lasthin the tiles are then both
# thinned and splatted using the laser beam with in an
# attempt to widen the LiDAR returns a little bit. From
# these height-normalized and point-splatted tiles
# the partial CHMs are computed (as detailed in the
# poster, the extended abstract, and the paper) that
# are merged into a single CHM in the final step.
#
# LiDAR input: LAS/LAZ
# raster output: TIF/IMG/BIL/DTM/ASC/FLT/XYZ
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting flightlines_to_single_CHM_pit_free ...")
### define positions of arguments in argv array
arg_input_folder = 1
arg_tile_size = 2
arg_buffer = 3
arg_terrain_type = 4
arg_beam_width = 5
arg_step = 6
arg_cores = 7
arg_empty_temp_dir = 8
arg_output_file = 9
arg_verbose = 10
arg_count_needed = 11
### get number of arguments
argc = len(sys.argv)
### make sure we have right number of arguments
if argc != arg_count_needed:
gp.AddMessage("Error. Wrong number of arguments. Got " + str(argc) + " expected " + str(arg_count_needed))
sys.exit(1)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get selected arguments
empty_temp_dir = sys.argv[arg_empty_temp_dir]
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### make sure the path does not contain open or closing brackets
if (lastools_path.count("(") > 0) or (lastools_path.count(")") > 0):
gp.AddMessage("Error. Path to .\\lastools installation contains brackets.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lastile executable
lastile_path = lastools_path+"\\lastile.exe"
### check if the lastile executable exists
if os.path.exists(lastile_path) == False:
gp.AddMessage("Cannot find lastile.exe at " + lastile_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastile_path + " ...")
### create the full path to the lasground executable
lasground_path = lastools_path+"\\lasground.exe"
### check if the lasground executable exists
if os.path.exists(lasground_path) == False:
gp.AddMessage("Cannot find lasground.exe at " + lasground_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasground_path + " ...")
### create the full path to the lasheight executable
lasheight_path = lastools_path+"\\lasheight.exe"
### check if the lasheight executable exists
if os.path.exists(lasheight_path) == False:
gp.AddMessage("Cannot find lasheight.exe at " + lasheight_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasheight_path + " ...")
### create the full path to the lasheight executable
lasthin_path = lastools_path+"\\lasthin.exe"
### check if the lasthin executable exists
if os.path.exists(lasthin_path) == False:
gp.AddMessage("Cannot find lasthin.exe at " + lasthin_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasthin_path + " ...")
### create the full path to the las2dem executable
las2dem_path = lastools_path+"\\las2dem.exe"
### check if the las2dem executable exists
if os.path.exists(las2dem_path) == False:
gp.AddMessage("Cannot find las2dem.exe at " + las2dem_path)
sys.exit(1)
else:
gp.AddMessage("Found " + las2dem_path + " ...")
### create the full path to the lasgrid executable
lasgrid_path = lastools_path+"\\lasgrid.exe"
### check if the lasgrid executable exists
if os.path.exists(lasgrid_path) == False:
gp.AddMessage("Cannot find lasgrid.exe at " + lasgrid_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasgrid_path + " ...")
### check if the empty temp directory exists
if os.path.exists(empty_temp_dir) == False:
gp.AddMessage("Cannot find empty temp dir " + empty_temp_dir)
sys.exit(1)
else:
gp.AddMessage("Found " + empty_temp_dir + " ...")
### make sure the empty temp directory is emtpy
if os.listdir(empty_temp_dir) != []:
gp.AddMessage("Empty temp directory '" + empty_temp_dir + "' is not empty")
sys.exit(1)
else:
gp.AddMessage("And it's empty ...")
###################################################
### first step: tile folder of input files
###################################################
### create the command string for lastile.exe
command = ['"'+lastile_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+sys.argv[arg_input_folder]+'\\*.las"')
command.append("-i")
command.append('"'+sys.argv[arg_input_folder]+'\\*.laz"')
### maybe use a user-defined tile size
if sys.argv[arg_tile_size] != "1000":
command.append("-tile_size")
command.append(sys.argv[arg_tile_size].replace(",","."))
### maybe create a buffer around the tiles
if sys.argv[arg_buffer] != "0":
command.append("-buffer")
command.append(sys.argv[arg_buffer].replace(",","."))
### an empty temp directory must have been selected
if empty_temp_dir != "#":
command.append("-odir")
command.append('"'+empty_temp_dir+'"')
else:
gp.AddMessage("Error. no empty temp directory was specified.")
sys.exit(1)
### give tiles a simple name
command.append("-o")
command.append("pit_free_temp_tile.laz")
### store temporary tiles in compressed format
command.append("-olaz")
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lastile
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in lastile step.")
sys.exit(1)
### report success
gp.AddMessage("lastile step done.")
###################################################
### second step: ground classify each tile
###################################################
### create the command string for lasground.exe
command = ['"'+lasground_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*.laz"+'"')
### what type of terrain do we have
if sys.argv[arg_terrain_type] == "wilderness":
command.append("-wilderness")
elif sys.argv[arg_terrain_type] == "city or warehouses":
command.append("-city")
command.append("-extra_fine")
elif sys.argv[arg_terrain_type] == "towns or flats":
command.append("-town")
command.append("-fine")
elif sys.argv[arg_terrain_type] == "metropolis":
command.append("-metro")
command.append("-ultra_fine")
### give ground-classified tiles a meaningful appendix
command.append("-odix")
command.append("_g")
### store ground-classified tiles in compressed format
command.append("-olaz")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasground
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in lasground step.")
sys.exit(1)
### report success
gp.AddMessage("lasground step done.")
###################################################
### third step: height-normalize each tile
###################################################
### create the command string for lasheight.exe
command = ['"'+lasheight_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_g.laz"+'"')
### height normalize
command.append("-replace_z")
### give height-classified tiles a meaningful appendix
command.append("-odix")
command.append("h")
### store height-classified tiles in compressed format
command.append("-olaz")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasheight
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in lasheight step.")
sys.exit(1)
### report success
gp.AddMessage("lasheight step done.")
###################################################
### fourth step: splat and thin-highest each tile
###################################################
### create the command string for lasheight.exe
command = ['"'+lasthin_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_gh.laz"+'"')
### keep the highest
command.append("-highest")
### on a grid with two by two times the final step size
command.append("-step")
command.append(str(0.5*float(sys.argv[arg_step].replace(",","."))))
### maybe splat with half the laser beam width
if sys.argv[arg_beam_width] != "#":
command.append("-subcircle")
command.append(str(0.5*float(sys.argv[arg_beam_width].replace(",","."))))
### give thin-classified tiles a meaningful appendix
command.append("-odix")
command.append("t")
### store thin-classified tiles in compressed format
command.append("-olaz")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasthin
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in lasthin step.")
sys.exit(1)
### report success
gp.AddMessage("lasthin step done.")
###################################################
### fifth step: raster all partial CHMs
###################################################
### at level 00
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_ght.laz"+'"')
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step])
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in temporary directory
command.append("-odir")
command.append(empty_temp_dir)
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("4")
command.append("-odix")
command.append("_chm00")
### store rastered tiles in BIL format
command.append("-obil")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in las2dem (CHM00) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (CHM00) step done.")
###################################################
### fifth step: raster all partial CHMs
###################################################
### at level 02
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_ght.laz"+'"')
### remove all points below 2 meters
command.append("-drop_z_below")
command.append("2")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### kill triangles that are three times the requested step (or bigger)
command.append("-kill")
command.append(str(3.0*float(sys.argv[arg_step].replace(",","."))))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in temporary directory
command.append("-odir")
command.append(empty_temp_dir)
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("4")
command.append("-odix")
command.append("_chm02")
### store rastered tiles in BIL format
command.append("-obil")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in las2dem (CHM02) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (CHM02) step done.")
###################################################
### fifth step: raster all partial CHMs
###################################################
### at level 05
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_ght.laz"+'"')
### remove all points below 5 meters
command.append("-drop_z_below")
command.append("5")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### kill triangles that are three times the requested step (or bigger)
command.append("-kill")
command.append(str(3.0*float(sys.argv[arg_step].replace(",","."))))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in temporary directory
command.append("-odir")
command.append(empty_temp_dir)
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("4")
command.append("-odix")
command.append("_chm05")
### store rastered tiles in BIL format
command.append("-obil")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in las2dem (CHM05) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (CHM05) step done.")
###################################################
### fifth step: raster all partial CHMs
###################################################
### at level 10
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_ght.laz"+'"')
### remove all points below 10 meters
command.append("-drop_z_below")
command.append("10")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### kill triangles that are three times the requested step (or bigger)
command.append("-kill")
command.append(str(3.0*float(sys.argv[arg_step].replace(",","."))))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in temporary directory
command.append("-odir")
command.append(empty_temp_dir)
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("4")
command.append("-odix")
command.append("_chm10")
### store rastered tiles in BIL format
command.append("-obil")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in las2dem (CHM10) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (CHM10) step done.")
###################################################
### fifth step: raster all partial CHMs
###################################################
### at level 15
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_ght.laz"+'"')
### remove all points below 15 meters
command.append("-drop_z_below")
command.append("15")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### kill triangles that are three times the requested step (or bigger)
command.append("-kill")
command.append(str(3.0*float(sys.argv[arg_step].replace(",","."))))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in temporary directory
command.append("-odir")
command.append(empty_temp_dir)
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("4")
command.append("-odix")
command.append("_chm15")
### store rastered tiles in BIL format
command.append("-obil")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in las2dem (CHM15) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (CHM15) step done.")
###################################################
### fifth step: raster all partial CHMs
###################################################
### at level 20
###################################################
### create the command string for las2dem.exe
command = ['"'+las2dem_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*_ght.laz"+'"')
### remove all points below 20 meters
command.append("-drop_z_below")
command.append("20")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### kill triangles that are three times the requested step (or bigger)
command.append("-kill")
command.append(str(3.0*float(sys.argv[arg_step].replace(",","."))))
### raster only tile interiors
command.append("-use_tile_bb")
### store rastered tiles in temporary directory
command.append("-odir")
command.append(empty_temp_dir)
### give the tiles a meaningful appendix
command.append("-ocut")
command.append("4")
command.append("-odix")
command.append("_chm20")
### store rastered tiles in BIL format
command.append("-obil")
### maybe we should run on multiple cores
if sys.argv[arg_cores] != "1":
command.append("-cores")
command.append(sys.argv[arg_cores])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of las2dem
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in las2dem (CHM20) step.")
sys.exit(1)
### report success
gp.AddMessage("las2dem (CHM20) step done.")
###################################################
### sixth step: merge highest rasters to final CHM
###################################################
### create the command string for lasgrid.exe
command = ['"'+lasgrid_path+'"']
### maybe use '-verbose' option
if sys.argv[arg_verbose] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*.bil"+'"')
### merge all files
command.append("-merged")
### keep the highest
command.append("-highest")
### raster tile with requested step
command.append("-step")
command.append(sys.argv[arg_step].replace(",","."))
### store rastered tiles in output file
command.append("-o")
command.append(sys.argv[arg_output_file])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasgrid
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in lasgrid step.")
sys.exit(1)
### report success
gp.AddMessage("lasgrid step done.")
###################################################
### final step: clean-up all temporary files
###################################################
### create the command string for clean-up
command = ["del"]
### add temporary files wildcard
command.append('"'+empty_temp_dir+"\\pit_free_temp_tile*.*"+'"')
### report command string
gp.AddMessage("clean-up command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of clean-up
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. flightlines_to_single_CHM_pit_free failed in clean-up step.")
sys.exit(1)
### report success
gp.AddMessage("clean-up step done.")
### report happy end
gp.AddMessage("Success. flightlines_to_single_CHM_pit_free done.")
|
|
# Accelerator for pip, the Python package manager.
#
# Authors:
# - Adam Feuer <adam@adamfeuer.com>
# - Peter Odding <peter.odding@paylogic.com>
# Last Change: March 4, 2016
# URL: https://github.com/paylogic/pip-accel
#
# A word of warning: Do *not* use the cached_property decorator here, because
# it interacts badly with the metaclass magic performed by the base class. I
# wasted about an hour trying to get it to work but it became more and more
# apparent that it was never going to work the way I wanted it to :-)
"""
Amazon S3 cache backend.
This module implements a cache backend that stores distribution archives in a
user defined `Amazon S3 <http://aws.amazon.com/s3/>`_ bucket. To enable this
backend you need to define the configuration option
:attr:`~.Config.s3_cache_bucket` and configure your Amazon S3 API
credentials (see the readme for details).
Using S3 compatible storage services
------------------------------------
The Amazon S3 API has been implemented in several open source projects and
dozens of online services. To use pip-accel with an S3 compatible storage
service you can override the :attr:`~.Config.s3_cache_url` option. The
pip-accel test suite actually uses this option to test the S3 cache backend by
running FakeS3_ in the background and pointing pip-accel at the FakeS3 server.
Below are some usage notes that may be relevant for people evaluating this
option.
**Secure connections**
Boto_ has to be told whether to make a "secure" connection to the S3 API and
pip-accel assumes the ``https://`` URL scheme implies a secure connection
while the ``http://`` URL scheme implies a non-secure connection.
**Calling formats**
Boto_ has the concept of "calling formats" for the S3 API and to connect to
the official Amazon S3 API pip-accel needs to specify the "sub-domain calling
format" or the API calls will fail. When you specify a nonstandard S3 API URL
pip-accel tells Boto to use the "ordinary calling format" instead. This
differentiation will undoubtedly not be correct in all cases. If this is
bothering you then feel free to open an issue on GitHub to make pip-accel more
flexible in this regard.
**Credentials**
If you don't specify S3 API credentials and the connection attempt to S3 fails
with "NoAuthHandlerFound: No handler was ready to authenticate" pip-accel will
fall back to an anonymous connection attempt. If that fails as well the S3
cache backend is disabled. It may be useful to note here that the pip-accel
test suite uses FakeS3_ and the anonymous connection fall back works fine.
A note about robustness
-----------------------
The Amazon S3 cache backend implemented in :mod:`pip_accel.caches.s3` is
specifically written to gracefully disable itself when it encounters known
errors such as:
- The configuration option :attr:`~.Config.s3_cache_bucket` is not set (i.e.
the user hasn't configured the backend yet).
- The :mod:`boto` package is not installed (i.e. the user ran ``pip install
pip-accel`` instead of ``pip install 'pip-accel[s3]'``).
- The connection to the S3 API can't be established (e.g. because API
credentials haven't been correctly configured).
- The connection to the configured S3 bucket can't be established (e.g. because
the bucket doesn't exist or the configured credentials don't provide access to
the bucket).
Additionally :class:`~pip_accel.caches.CacheManager` automatically disables
cache backends that raise exceptions on
:class:`~pip_accel.caches.AbstractCacheBackend.get()` and
:class:`~pip_accel.caches.AbstractCacheBackend.put()` operations. The end
result is that when the S3 backend fails you will just revert to using the
cache on the local file system.
Optionally if you are using read only credentials you can disable
:class:`~S3CacheBackend.put()` operations by setting the configuration
option :attr:`~.Config.s3_cache_readonly`.
----
.. _FakeS3: https://github.com/jubos/fake-s3
.. _Boto: https://github.com/boto/boto
"""
# Standard library modules.
import logging
import os
# External dependencies.
from humanfriendly import coerce_boolean, Timer
# Modules included in our package.
from pip_accel import PatchedAttribute
from pip_accel.caches import AbstractCacheBackend
from pip_accel.compat import PY3, urlparse
from pip_accel.exceptions import CacheBackendDisabledError, CacheBackendError
from pip_accel.utils import AtomicReplace, makedirs
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
# The name of the boto.config configuration section that controls general
# settings like the number of retries and the HTTP socket timeout.
BOTO_CONFIG_SECTION = 'Boto'
# The name of the boto.config option that controls the number of retries.
BOTO_CONFIG_NUM_RETRIES_OPTION = 'num_retries'
# The name of the boto.config option that controls the HTTP socket timeout.
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION = 'http_socket_timeout'
# The `coloredlogs' package installs a logging handler on the root logger which
# means all loggers automatically write their log messages to the standard
# error stream. In the case of Boto this is a bit confusing because Boto logs
# messages with the ERROR severity even when nothing is wrong, because it
# tries to connect to the Amazon EC2 metadata service which is (obviously) not
# available outside of Amazon EC2:
#
# boto[6851] DEBUG Retrieving credentials from metadata server.
# boto[6851] ERROR Caught exception reading instance data
#
# To avoid confusing users of pip-accel (i.e. this is not an error because it's
# properly handled) we silence the Boto logger. To avoid annoying people who
# actually want to debug Boto we'll also provide an escape hatch in the form of
# an environment variable.
if coerce_boolean(os.environ.get('PIP_ACCEL_SILENCE_BOTO', 'true')):
logging.getLogger('boto').setLevel(logging.FATAL)
class S3CacheBackend(AbstractCacheBackend):
"""The S3 cache backend stores distribution archives in a user defined Amazon S3 bucket."""
PRIORITY = 20
def get(self, filename):
"""
Download a distribution archive from the configured Amazon S3 bucket.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or :data:`None`.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
# Check if the distribution archive is available.
raw_key = self.get_cache_key(filename)
logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key)
key = self.s3_bucket.get_key(raw_key)
if key is None:
logger.debug("Distribution archive is not available in S3 bucket.")
else:
# Download the distribution archive to the local binary index.
# TODO Shouldn't this use LocalCacheBackend.put() instead of
# implementing the same steps manually?!
logger.info("Downloading distribution archive from S3 bucket ..")
file_in_cache = os.path.join(self.config.binary_cache, filename)
makedirs(os.path.dirname(file_in_cache))
with AtomicReplace(file_in_cache) as temporary_file:
key.get_contents_to_filename(temporary_file)
logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer)
return file_in_cache
def put(self, filename, handle):
"""
Upload a distribution archive to the configured Amazon S3 bucket.
If the :attr:`~.Config.s3_cache_readonly` configuration option is
enabled this method does nothing.
:param filename: The filename of the distribution archive (a string).
:param handle: A file-like object that provides access to the
distribution archive.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
if self.config.s3_cache_readonly:
logger.info('Skipping upload to S3 bucket (using S3 in read only mode).')
else:
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
from boto.s3.key import Key
raw_key = self.get_cache_key(filename)
logger.info("Uploading distribution archive to S3 bucket: %s", raw_key)
key = Key(self.s3_bucket)
key.key = raw_key
try:
key.set_contents_from_file(handle)
except Exception as e:
logger.info("Encountered error writing to S3 bucket, "
"falling back to read only mode (exception: %s)", e)
self.config.s3_cache_readonly = True
else:
logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
@property
def s3_bucket(self):
"""
Connect to the user defined Amazon S3 bucket.
Called on demand by :func:`get()` and :func:`put()`. Caches its
return value so that only a single connection is created.
:returns: A :class:`boto.s3.bucket.Bucket` object.
:raises: :exc:`.CacheBackendDisabledError` when the user hasn't
defined :attr:`.Config.s3_cache_bucket`.
:raises: :exc:`.CacheBackendError` when the connection to the Amazon
S3 bucket fails.
"""
if not hasattr(self, 'cached_bucket'):
self.check_prerequisites()
with PatchedBotoConfig():
from boto.exception import BotoClientError, BotoServerError, S3ResponseError
# The following try/except block translates unexpected exceptions
# raised by Boto into a CacheBackendError exception.
try:
# The following try/except block handles the expected exception
# raised by Boto when an Amazon S3 bucket does not exist.
try:
logger.debug("Connecting to Amazon S3 bucket: %s", self.config.s3_cache_bucket)
self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket)
except S3ResponseError as e:
if e.status == 404 and self.config.s3_cache_create_bucket:
logger.info("Amazon S3 bucket doesn't exist yet, creating it now: %s",
self.config.s3_cache_bucket)
self.s3_connection.create_bucket(self.config.s3_cache_bucket)
self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket)
else:
# Don't swallow exceptions we can't handle.
raise
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the configured Amazon S3 bucket
{bucket}! Are you sure the bucket exists and is accessible
using the provided credentials? The Amazon S3 cache backend
will be disabled for now.
""", bucket=repr(self.config.s3_cache_bucket))
return self.cached_bucket
@property
def s3_connection(self):
"""
Connect to the Amazon S3 API.
If the connection attempt fails because Boto can't find credentials the
attempt is retried once with an anonymous connection.
Called on demand by :attr:`s3_bucket`.
:returns: A :class:`boto.s3.connection.S3Connection` object.
:raises: :exc:`.CacheBackendError` when the connection to the Amazon
S3 API fails.
"""
if not hasattr(self, 'cached_connection'):
self.check_prerequisites()
with PatchedBotoConfig():
import boto
from boto.exception import BotoClientError, BotoServerError, NoAuthHandlerFound
from boto.s3.connection import S3Connection, SubdomainCallingFormat, OrdinaryCallingFormat
try:
# Configure the number of retries and the socket timeout used
# by Boto. Based on the snippet given in the following email:
# https://groups.google.com/d/msg/boto-users/0osmP0cUl5Y/X4NdlMGWKiEJ
if not boto.config.has_section(BOTO_CONFIG_SECTION):
boto.config.add_section(BOTO_CONFIG_SECTION)
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_NUM_RETRIES_OPTION,
str(self.config.s3_cache_retries))
boto.config.set(BOTO_CONFIG_SECTION,
BOTO_CONFIG_SOCKET_TIMEOUT_OPTION,
str(self.config.s3_cache_timeout))
logger.debug("Connecting to Amazon S3 API ..")
endpoint = urlparse(self.config.s3_cache_url)
host, _, port = endpoint.netloc.partition(':')
kw = dict(
host=host,
port=int(port) if port else None,
is_secure=(endpoint.scheme == 'https'),
calling_format=(SubdomainCallingFormat() if host == S3Connection.DefaultHost
else OrdinaryCallingFormat()),
)
try:
self.cached_connection = S3Connection(**kw)
except NoAuthHandlerFound:
logger.debug("Amazon S3 API credentials missing, retrying with anonymous connection ..")
self.cached_connection = S3Connection(anon=True, **kw)
except (BotoClientError, BotoServerError):
raise CacheBackendError("""
Failed to connect to the Amazon S3 API! Most likely your
credentials are not correctly configured. The Amazon S3
cache backend will be disabled for now.
""")
return self.cached_connection
def get_cache_key(self, filename):
"""
Compose an S3 cache key based on :attr:`.Config.s3_cache_prefix` and the given filename.
:param filename: The filename of the distribution archive (a string).
:returns: The cache key for the given filename (a string).
"""
return '/'.join(filter(None, [self.config.s3_cache_prefix, filename]))
def check_prerequisites(self):
"""
Validate the prerequisites required to use the Amazon S3 cache backend.
Makes sure the Amazon S3 cache backend is configured
(:attr:`.Config.s3_cache_bucket` is defined by the user) and
:mod:`boto` is available for use.
:raises: :exc:`.CacheBackendDisabledError` when a prerequisite fails.
"""
if not self.config.s3_cache_bucket:
raise CacheBackendDisabledError("""
To use Amazon S3 as a cache you have to set the environment
variable $PIP_ACCEL_S3_BUCKET and configure your Amazon S3 API
credentials (see the documentation for details).
""")
try:
__import__('boto')
except ImportError:
raise CacheBackendDisabledError("""
Boto is required to use Amazon S3 as a cache but it looks like
Boto is not installed! You can resolve this issue by installing
pip-accel using the command `pip install pip-accel[s3]'. The
Amazon S3 cache backend will be disabled for now.
""")
class PatchedBotoConfig(PatchedAttribute):
"""
Monkey patch for Boto's configuration handling.
Boto's configuration handling is kind of broken on Python 3 `as documented
here <https://github.com/boto/boto/issues/2617>`_. The :class:`PatchedBotoConfig`
class implements a context manager that temporarily patches Boto to work
around the bug.
Without this monkey patch it is impossible to configure the number of
retries on Python 3 which makes the pip-accel test suite horribly slow.
"""
def __init__(self):
"""Initialize a :class:`PatchedBotoConfig` object."""
from boto import config
from boto.pyami.config import Config, ConfigParser
self.instance = config
self.unbound_method = ConfigParser.get
super(PatchedBotoConfig, self).__init__(
object=Config,
attribute='get',
value=self.get,
enabled=PY3,
)
def get(self, section, name, default=None, **kw):
"""Replacement for :func:`boto.pyami.config.Config.get()`."""
try:
return self.unbound_method(self.instance, section, name, **kw)
except Exception:
return default
|
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from corepy.spre.spe import Register, InstructionOperand#, Variable, Label
class CALField(InstructionOperand):
def __init__(self, name, default = None):
InstructionOperand.__init__(self, name, default)
return
def render(self, value):
return value
# def __eq__(self, other):
# return type(self) == type(other)
class CALFlagField(CALField):
def __init__(self, name, ilstr, default = None):
self.ilstr = ilstr
InstructionOperand.__init__(self, name, default)
return
def check(self, value):
return value in (True, False)
def render(self, value):
if value:
return self.ilstr
return ''
# def __eq__(self, other):
# return type(self) == type(other) and self.name == other.name
class RegisterField(CALField):
def check(self, value):
return True
def render(self, value):
if type(value) == type(Register):
return value.render()
else:
return str(value)
class RELOPField(CALField):
def check(self, value):
return value in ('eq', 'ge', 'gt', 'le', 'lt', 'ne')
def render(self, value):
return '_relop(' + value + ')'
class ZEROOPField(CALField):
def check(self, value):
return value in ('zero', 'fltmax', 'inf_else_max', 'infinity')
def render(self, value):
return '_zeroop(' + value + ')'
class LOGICOPField(CALField):
def check(self, value):
return value in ('eq', 'ne')
def render(self, value):
return '_logicop(' + value + ')'
class USAGEField(CALField):
def check(self, value):
#if value == None:
# return True
return value in ('backcolor', 'color', 'fog', 'generic', 'pointsize', 'pos', 'wincoord', None)
def render(self, value):
if value == None:
return ''
else:
return '_usage(' + value + ')'
class INTERPField(CALField):
def check(self, value):
return value in ('constant', 'linear', 'centroid', 'noperspective', 'noper_centroid', 'noper_sample', 'sample', 'notused', 'linear_centroid', 'linear_noperspective', 'linear_noper_centroid', 'linear_noper_sample', 'linear_sample')
def render(self, value):
if value != 'notused':
return '_interp(' + value + ')'
else:
return ''
class SHARINGMODEField(CALField):
def check(self, value):
return value in ('rel', 'abs', None)
def render(self, value):
if value == None:
return ''
else:
return '_sharingMode(' + value + ')'
class TYPEField(CALField):
def check(self, value):
return value in (1, 2, 3, '1d', '2d', '2dms_array', '2dmsaa', '3d', 'cubemap', 'cubemaparray', 'unkown', 'buffer')
def render(self, value):
if type(value)==str:
return value
else:
return str(value) + 'd'
# this one is different beccause of UNNORM option - '_type(' and ')' is handled by inst
# Furthermore, some instructions handle this differently, such as dclpt which does not
# have the unnorm flag
class RESOURCEField(CALField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0 and value < 256
def render(self, value):
return '_resource(' + str(value) + ')'
class RESOURCEIDField(CALField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0 and value < 256
def render(self, value):
return '_id(' + str(value) + ')'
class SAMPLERField(CALField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0 and value < 16
def render(self, value):
return '_sampler(' + str(value) + ')'
#class IEEEField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_ieee'
# else:
# return ''
#class UINTField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_uint'
# else:
# return ''
#class REPEATField(CALFlagField):
# def render(self, value):
# if value == True:
# return ' repeat'
# else:
# return ''
#class NEIGHBOREXCHField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_neighborExch'
# else:
# return ''
#class UNNORMField(CALFlagField):
# def render(self, value):
# if value == True:
# return ',unnorm'
# else:
# return ''
#class THREADSField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_threads'
# else:
# return ''
#class LDSField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_lds'
# else:
# return ''
#class MEMORYField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_memory'
# else:
# return ''
#class SRField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_sr'
# else:
# return ''
#class CENTERField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_center'
# else:
# return ''
#class BIASField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_bias'
# else:
# return ''
#class INVERTField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_invert'
# else:
# return ''
#class CENTEREDField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_centered'
# else:
# return ''
class STAGEField(CALField):
def check(self, value):
if value == None:
return True
if isinstance(value, (int, long)) and value >= 0 and value < 256:
return True
return False
def render(self, value):
if value == None:
return ''
else:
return '_stage(' + value + ')'
class LOFFSETField(CALField):
def check(self, value):
if value == None:
return True
if value in (0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60):
return True
return False
def render(self, value):
if value == None:
return ''
else:
return '_lOffset(' + value + ')'
#class SAMPLEField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_sample'
# else:
# return ''
class STREAMField(CALField):
def check(self, value):
if isinstance(value, (int, long)) and value >= 0 and value < 16:
return True
return False
def render(self, value):
return '_exportStream(' + str(value) + ')'
class OFFSETField(CALField):
def check(self, value):
if isinstance(value, (int, long)) and value >= 0 and value < 512:
return True
return False
def render(self, value):
return '_elemOffset(' + str(value) + ')'
class ELEMField(CALField):
def check(self, value):
if isinstance(value, (int, long)) and value >= 0 and value < 16:
return True
return False
def render(self, value):
return '_elem(' + str(value) + ')'
class VELEMField(CALField):
def check(self, value):
if isinstance(value, (int, long)) and value >= 0 and value < 64:
return True
return False
def render(self, value):
return '_elem(' + str(value) + ')'
class AOFFIMMIField(CALField):
def check(self, value):
if value == None or value == ():
return True
if len(value) != 3:
return False
p = 2 ** 16
return False not in [isinstance(v, (int, long)) and v >= 0 and v < p for v in value]
#try:
# valid = True
# if len(value) == 3:
# if not isinstance(value[0], (int, long)) or value[0] < 0 or value[0] > pow(2, 16):
# valid = False
# if not isinstance(value[1], (int, long)) or value[1] < 0 or value[1] > pow(2, 16):
# valid = False
# if not isinstance(value[2], (int, long)) or value[2] < 0 or value[2] > pow(2, 16):
# valid = False
# return valid
# return False
#except:
# return False
def render(self, value):
if value == None or value == ():
return ''
else:
return '_aoffimmi(' + str(value[0]) + ',' + str(value[1]) + ',' + str(value[2]) + ')'
class FMTField(CALField):
def check(self, value):
vals = ('float', 'mixed', 'sint', 'snorm', 'srgb', 'uint', 'unkown', 'unorm')
if value in vals:
return True
if len(value) != 4:
return False
return False not in [v not in vals for v in value]
#else:
# valid = True
# if len(value) == 4:
# if not value[0] in vals:
# valid = False
# if not value[1] in vals:
# valid = False
# if not value[2] in vals:
# valid = False
# if not value[3] in vals:
# valid = False
# return valid
# return False
def render(self, value):
if value == None:
return ''
else:
if type(value) == str:
return '_fmtx(' + value + ')_fmty(' + value + ')_fmtz(' + value + ')_fmtw(' + value + ')'
else:
return '_fmtx(' + value[0] + ')_fmty(' + value[1] + ')_fmtz(' + value[2] + ')_fmtw(' + value[3] + ')'
class XYZWDefaultField(CALField):
def check(self, value):
if len(value) != 4:
return False
return False not in [v not in (0.0, 1.0, None) for v in value]
#valid = True
#if not(value[0] == 0.0 or value[0] == 1.0 or value[0] == None):
# valid = False
#if not(value[1] == 0.0 or value[1] == 1.0 or value[1] == None):
# valid = False
#if not(value[2] == 0.0 or value[2] == 1.0 or value[2] == None):
# valid = False
#if not(value[3] == 0.0 or value[3] == 1.0 or value[3] == None):
# valid = False
#return valid
def render(self, value):
strvalue = ['' for i in range(4)]
for i, val in enumerate(value):
if val == None:
strvalue[i] = '*'
else:
strvalue[i] = str(val)
return '_x(' + strvalue[0] + ')_y(' + strvalue[1] + ')_z(' + strvalue[2] + ')_w(' + strvalue[3] + ')'
class XYZWImportField(CALField):
def check(self, value):
valid = True
if len(value) != 4:
return False
#return False not in [v not in ('0', '1', '*', '-', None) for v in value]
if not(value[0] == '0' or value[0] == '1' or value[0] == '*' or value[0] == '-' or value[0] == None):
valid = False
if not(value[1] == '0' or value[1] == '1' or value[1] == '*' or value[1] == '-' or value[1] == None):
valid = False
if not(value[2] == '0' or value[2] == '1' or value[2] == '*' or value[2] == '-' or value[2] == None):
valid = False
if not(value[3] == '0' or value[3] == '1' or value[3] == '*' or value[3] == '-' or value[3] == None):
valid = False
return valid
def render(self, value):
strvalue = ['' for i in range(4)]
for i, val in enumerate(value):
if val == None:
strvalue[i] = '*'
else:
strvalue[i] = str(val)
return '_x(' + strvalue[0] + ')_y(' + strvalue[1] + ')_z(' + strvalue[2] + ')_w(' + strvalue[3] + ')'
class TOPOLOGYField(CALField):
def check(self, value):
return value in ('line', 'line_adj', 'point', 'triangle', 'triangle_adj')
def render(self, value):
return str(value)
class OUTPUTTOPOLOGYField(CALField):
def check(self, value):
return value in ('linestrip', 'pointlist', 'trianglestrip')
def render(self, value):
return str(value)
class LiteralField(CALField):
def check(self, value):
return isinstance(value, (int, long, float))
def render(self, value):
return str(value)
class IntegerLiteralField(CALField):
def check(self, value):
return isinstance(value, (int, long))
def render(self, value):
return str(value)
class IntegerLabelField(IntegerLiteralField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0
def render(self, value):
return str(value)
class SHIFTField(CALField):
def check(self, value):
return value in ('', 'x2', 'x4', 'x8', 'd2', 'd4', 'd8')
def render(self, value):
if value != '':
return '_' + value
else:
return ''
#class SATField(CALFlagField):
# def render(self, value):
# if value == True:
# return '_sat'
# else:
# return ''
class MATRIXField(CALField):
def check(self, value):
return value in ('3x2', '3x3', '3x4', '4x3', '4x4')
def render(self, value):
return '_matrix(' + value + ')'
class USAGEINDEXField(CALField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0 and value < 256
def render(self, value):
return '_usageIndex(' + str(value) + ')'
class PARAMField(CALField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0 and value < 256
def render(self, value):
return '_param(' + str(value) + ')'
class COORDMODEField(CALField):
def check(self, value):
return value in ('normalized', 'unkown', 'unnormalized')
def render(self, value):
return '_coordmode(' + value + ')'
class BOOLField(CALField):
def check(self, value):
return isinstance(value, (int, long)) and value >= 0 and value < 2
def render(self, value):
return str(value)
class NOISETYPEField(CALField):
def check(self, value):
return value in ('perlin1D', 'perlin2D', 'perlin3D', 'perlin4D')
def render(self, value):
return '_type(' + value + ')'
class CMPVALField(CALField):
def check(self, value):
return value in (0.0, 0.5, 1.0, -0.5, -1.0)
def render(self, value):
return '_cmpval(' + value + ')'
OPCD0 = CALField("OPCD0")
TRGT = RegisterField("TRGT")
SRC0 = RegisterField("SRC0")
SRC1 = RegisterField("SRC1")
SRC2 = RegisterField("SRC2")
SRC3 = RegisterField("SRC3")
RELOP = RELOPField("RELOP")
ZEROOP = ZEROOPField("ZEROOP", 'inf_else_max')
LOGICOP = LOGICOPField("LOGICOP")
INTERP = INTERPField("INTERP", 'notused')
RESOURCE = RESOURCEField("RESOURCE")
RESOURCEID = RESOURCEIDField("RESOURCEID") # this would be binary identical to RESOURCE, but the text name is different for some instructions
SAMPLER = SAMPLERField("SAMPLER")
LBL = IntegerLabelField("LBL", 0)
IEEE = CALFlagField("IEEE", "_ieee", False)
UINT = CALFlagField("UINT", "_uint", False)
REPEAT = CALFlagField("REPEAT", " repeat", False)
STAGE = STAGEField("STAGE", 0)
SAMPLE = CALFlagField("SAMPLE", "_sample", False)
AOFFIMMI = AOFFIMMIField("AOFFIMMI", ())
USAGE = USAGEField("USAGE", 'interp')
TYPE = TYPEField("TYPE")
UNNORM = CALFlagField("UNNORM", ",unnorm", False)
FMT = FMTField("FMT")
STREAM = STREAMField("STREAM")
OFFSET = OFFSETField("OFFSET")
ELEM = ELEMField("ELEM")
VELEM = VELEMField("VELEM")
NEIGHBOREXCH = CALFlagField("NEIGHBOREXCH", "_neighborExch", False)
SHARINGMODE = SHARINGMODEField("SHARINGMODE", False)
LOFFSET = LOFFSETField("LOFFSET", 0)
THREADS = CALFlagField("THREADS", "_threads", False)
LDS = CALFlagField("LDS", "_lds", False)
MEMORY = CALFlagField("MEMORY", "_memory", False)
SR = CALFlagField("SR", "_sr", False)
XYZWDefault = XYZWDefaultField("XYZWDefault")
XYZWImport = XYZWImportField("XYZWImport")
TOPOLOGY = TOPOLOGYField("TOPOLOGY")
OUTPUTTOPOLOGY = OUTPUTTOPOLOGYField("OUTPUTTOPOLOGY")
L0 = LiteralField("L0")
L1 = LiteralField("L1")
L2 = LiteralField("L2")
L3 = LiteralField("L3")
IL0 = IntegerLiteralField("IL0")
SHIFT = SHIFTField("SHIFT", '')
SAT = CALFlagField("SAT", "_sat", False)
MATRIX = MATRIXField("MATRIX")
CENTER = CALFlagField("CENTER", "_center", True)
BIAS = CALFlagField("BIAS", "_bias", False)
INVERT = CALFlagField("INVERT", "_invert", False)
CENTERED = CALFlagField("CENTERED", "_centered", False)
USAGEINDEX = USAGEINDEXField("USAGEINDEX")
CENTROID = CALFlagField("CENTROID", "_centroid", False)
ORIGIN = CALFlagField("ORIGIN", "_origin", False) # used in dclpin - maybe...
PARAM = PARAMField("PARAM")
BOOL = BOOLField("BOOL")
ROUND = CALFlagField("ROUND", "_round", False)
NOISETYPE = NOISETYPEField("NOISETYPE")
COORDMODE = NOISETYPEField("COORDMODE")
NORMALIZE = CALFlagField("NORMALIZE", "_normalize", False)
CMPVAL = CMPVALField("CMPVAL")
|
|
"""Tests for sgf_grammar.py."""
import unittest
from betago.gosgf import sgf_grammar
class SgfGrammarTestCase(unittest.TestCase):
def test_is_valid_property_identifier(tc):
ivpi = sgf_grammar.is_valid_property_identifier
tc.assertIs(ivpi(b"B"), True)
tc.assertIs(ivpi(b"PB"), True)
tc.assertIs(ivpi(b"ABCDEFGH"), True)
tc.assertIs(ivpi(b"ABCDEFGHI"), False)
tc.assertIs(ivpi(b""), False)
tc.assertIs(ivpi(b"b"), False)
tc.assertIs(ivpi(b"Player"), False)
tc.assertIs(ivpi(b"P2"), False)
tc.assertIs(ivpi(b" PB"), False)
tc.assertIs(ivpi(b"PB "), False)
tc.assertIs(ivpi(b"P B"), False)
tc.assertIs(ivpi(b"PB\x00"), False)
def test_is_valid_property_value(tc):
ivpv = sgf_grammar.is_valid_property_value
tc.assertIs(ivpv(b""), True)
tc.assertIs(ivpv(b"hello world"), True)
tc.assertIs(ivpv(b"hello\nworld"), True)
tc.assertIs(ivpv(b"hello \x00 world"), True)
tc.assertIs(ivpv(b"hello \xa3 world"), True)
tc.assertIs(ivpv(b"hello \xc2\xa3 world"), True)
tc.assertIs(ivpv(b"hello \\-) world"), True)
tc.assertIs(ivpv(b"hello (;[) world"), True)
tc.assertIs(ivpv(b"[hello world]"), False)
tc.assertIs(ivpv(b"hello ] world"), False)
tc.assertIs(ivpv(b"hello \\] world"), True)
tc.assertIs(ivpv(b"hello world \\"), False)
tc.assertIs(ivpv(b"hello world \\\\"), True)
tc.assertIs(ivpv(b"x" * 70000), True)
def test_tokeniser(tc):
tokenise = sgf_grammar.tokenise
tc.assertEqual(tokenise(b"(;B[ah][]C[a\xa3b])")[0],
[('D', b'('),
('D', b';'),
('I', b'B'),
('V', b'ah'),
('V', b''),
('I', b'C'),
('V', b'a\xa3b'),
('D', b')')])
def check_complete(s, *args):
tokens, tail_index = tokenise(s, *args)
tc.assertEqual(tail_index, len(s))
return len(tokens)
def check_incomplete(s, *args):
tokens, tail_index = tokenise(s, *args)
return len(tokens), tail_index
# check surrounding junk
tc.assertEqual(check_complete(b""), 0)
tc.assertEqual(check_complete(b"junk (;B[ah])"), 5)
tc.assertEqual(check_incomplete(b"junk"), (0, 0))
tc.assertEqual(check_incomplete(b"junk (B[ah])"), (0, 0))
tc.assertEqual(check_incomplete(b"(;B[ah]) junk"), (5, 8))
# check paren-balance count
tc.assertEqual(check_incomplete(b"(; ))(([ag]B C[ah])"), (3, 4))
tc.assertEqual(check_incomplete(b"(;( )) (;)"), (5, 6))
tc.assertEqual(check_incomplete(b"(;(()())) (;)"), (9, 9))
# check start_position
tc.assertEqual(check_complete(b"(; ))(;B[ah])", 4), 5)
tc.assertEqual(check_complete(b"(; ))junk (;B[ah])", 4), 5)
tc.assertEqual(check_complete(b"(;XX[abc][def]KO[];B[bc])"), 11)
tc.assertEqual(check_complete(b"( ;XX[abc][def]KO[];B[bc])"), 11)
tc.assertEqual(check_complete(b"(; XX[abc][def]KO[];B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX [abc][def]KO[];B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc] [def]KO[];B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc][def] KO[];B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc][def]KO [];B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc][def]KO[] ;B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc][def]KO[]; B[bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc][def]KO[];B [bc])"), 11)
tc.assertEqual(check_complete(b"(;XX[abc][def]KO[];B[bc] )"), 11)
tc.assertEqual(check_complete(b"( ;\nB\t[ah]\f[ef]\v)"), 6)
tc.assertEqual(check_complete(b"(;[Ran\xc2\xa3dom :\nstu@ff][ef]"), 4)
tc.assertEqual(check_complete(b"(;[ah)])"), 4)
tc.assertEqual(check_incomplete(b"(;B[ag"), (3, 3))
tc.assertEqual(check_incomplete(b"(;B[ag)"), (3, 3))
tc.assertEqual(check_incomplete(b"(;AddBlack[ag])"), (3, 3))
tc.assertEqual(check_incomplete(b"(;+B[ag])"), (2, 2))
tc.assertEqual(check_incomplete(b"(;B+[ag])"), (3, 3))
tc.assertEqual(check_incomplete(b"(;B[ag]+)"), (4, 7))
tc.assertEqual(check_complete(r"(;[ab \] cd][ef]".encode('ascii')), 4)
tc.assertEqual(check_complete(r"(;[ab \] cd\\][ef]".encode('ascii')), 4)
tc.assertEqual(check_complete(r"(;[ab \] cd\\\\][ef]".encode('ascii')), 4)
tc.assertEqual(check_complete(r"(;[ab \] \\\] cd][ef]".encode('ascii')), 4)
tc.assertEqual(check_incomplete(r"(;B[ag\])".encode('ascii')), (3, 3))
tc.assertEqual(check_incomplete(r"(;B[ag\\\])".encode('ascii')), (3, 3))
def test_parser_structure(tc):
parse_sgf_game = sgf_grammar.parse_sgf_game
def shape(s):
coarse_game = parse_sgf_game(s)
return len(coarse_game.sequence), len(coarse_game.children)
tc.assertEqual(shape(b"(;C[abc]KO[];B[bc])"), (2, 0))
tc.assertEqual(shape(b"initial junk (;C[abc]KO[];B[bc])"), (2, 0))
tc.assertEqual(shape(b"(;C[abc]KO[];B[bc]) final junk"), (2, 0))
tc.assertEqual(shape(b"(;C[abc]KO[];B[bc]) (;B[ag])"), (2, 0))
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_game, b"")
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_game, b"junk")
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_game, b"()")
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_game, b"(B[ag])")
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_game, b"B[ag]")
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_game, b"[ag]")
tc.assertEqual(shape(b"(;C[abc]AB[ab][bc];B[bc])"), (2, 0))
tc.assertEqual(shape(b"(;C[abc] AB[ab]\n[bc]\t;B[bc])"), (2, 0))
tc.assertEqual(shape(b"(;C[abc]KO[];;B[bc])"), (3, 0))
tc.assertEqual(shape(b"(;)"), (1, 0))
tc.assertRaisesRegexp(ValueError, "property with no values",
parse_sgf_game, b"(;B)")
tc.assertRaisesRegexp(ValueError, "unexpected value",
parse_sgf_game, b"(;[ag])")
tc.assertRaisesRegexp(ValueError, "unexpected value",
parse_sgf_game, b"(;[ag][ah])")
tc.assertRaisesRegexp(ValueError, "unexpected value",
parse_sgf_game, b"(;[B][ag])")
tc.assertRaisesRegexp(ValueError, "unexpected end of SGF data",
parse_sgf_game, b"(;B[ag]")
tc.assertRaisesRegexp(ValueError, "unexpected end of SGF data",
parse_sgf_game, b"(;B[ag][)]")
tc.assertRaisesRegexp(ValueError, "property with no values",
parse_sgf_game, b"(;B;W[ah])")
tc.assertRaisesRegexp(ValueError, "unexpected value",
parse_sgf_game, b"(;B[ag](;[ah]))")
tc.assertRaisesRegexp(ValueError, "property with no values",
parse_sgf_game, b"(;B W[ag])")
def test_parser_tree_structure(tc):
parse_sgf_game = sgf_grammar.parse_sgf_game
def shape(s):
coarse_game = parse_sgf_game(s)
return len(coarse_game.sequence), len(coarse_game.children)
tc.assertEqual(shape(b"(;C[abc]AB[ab](;B[bc]))"), (1, 1))
tc.assertEqual(shape(b"(;C[abc]AB[ab](;B[bc])))"), (1, 1))
tc.assertEqual(shape(b"(;C[abc]AB[ab](;B[bc])(;B[bd]))"), (1, 2))
def shapetree(s):
def _shapetree(coarse_game):
return (
len(coarse_game.sequence),
[_shapetree(pg) for pg in coarse_game.children])
return _shapetree(parse_sgf_game(s))
tc.assertEqual(shapetree(b"(;C[abc]AB[ab](;B[bc])))"),
(1, [(1, [])])
)
tc.assertEqual(shapetree(b"(;C[abc]AB[ab](;B[bc]))))"),
(1, [(1, [])])
)
tc.assertEqual(shapetree(b"(;C[abc]AB[ab](;B[bc])(;B[bd])))"),
(1, [(1, []), (1, [])])
)
tc.assertEqual(shapetree(b"""
(;C[abc]AB[ab];C[];C[]
(;B[bc])
(;B[bd];W[ca] (;B[da])(;B[db];W[ea]) )
)"""),
(3, [
(1, []),
(2, [(1, []), (2, [])])
])
)
tc.assertRaisesRegexp(ValueError, "unexpected end of SGF data",
parse_sgf_game, b"(;B[ag];W[ah](;B[ai])")
tc.assertRaisesRegexp(ValueError, "empty sequence",
parse_sgf_game, b"(;B[ag];())")
tc.assertRaisesRegexp(ValueError, "empty sequence",
parse_sgf_game, b"(;B[ag]())")
tc.assertRaisesRegexp(ValueError, "empty sequence",
parse_sgf_game, b"(;B[ag]((;W[ah])(;W[ai]))")
tc.assertRaisesRegexp(ValueError, "unexpected node",
parse_sgf_game, b"(;B[ag];W[ah](;B[ai]);W[bd])")
tc.assertRaisesRegexp(ValueError, "property value outside a node",
parse_sgf_game, b"(;B[ag];(W[ah];B[ai]))")
tc.assertRaisesRegexp(ValueError, "property value outside a node",
parse_sgf_game, b"(;B[ag](;W[ah];)B[ai])")
tc.assertRaisesRegexp(ValueError, "property value outside a node",
parse_sgf_game, b"(;B[ag](;W[ah])(B[ai]))")
def test_parser_properties(tc):
parse_sgf_game = sgf_grammar.parse_sgf_game
def props(s):
coarse_game = parse_sgf_game(s)
return coarse_game.sequence
tc.assertEqual(props(b"(;C[abc]KO[]AB[ai][bh][ee];B[ bc])"),
[{b'C': [b'abc'], b'KO': [b''], b'AB': [b'ai', b'bh', b'ee']},
{b'B': [b' bc']}])
tc.assertEqual(props(r"(;C[ab \] \) cd\\])".encode('ascii')),
[{b'C': [r"ab \] \) cd\\".encode('ascii')]}])
tc.assertEqual(props(b"(;XX[1]YY[2]XX[3]YY[4])"),
[{b'XX': [b'1', b'3'], b'YY' : [b'2', b'4']}])
def test_parse_sgf_collection(tc):
parse_sgf_collection = sgf_grammar.parse_sgf_collection
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_collection, b"")
tc.assertRaisesRegexp(ValueError, "no SGF data found",
parse_sgf_collection, b"()")
games = parse_sgf_collection(b"(;C[abc]AB[ab];X[];X[](;B[bc]))")
tc.assertEqual(len(games), 1)
tc.assertEqual(len(games[0].sequence), 3)
games = parse_sgf_collection(b"(;X[1];X[2];X[3](;B[bc])) (;Y[1];Y[2])")
tc.assertEqual(len(games), 2)
tc.assertEqual(len(games[0].sequence), 3)
tc.assertEqual(len(games[1].sequence), 2)
games = parse_sgf_collection(
b"dummy (;X[1];X[2];X[3](;B[bc])) junk (;Y[1];Y[2]) Nonsense")
tc.assertEqual(len(games), 2)
tc.assertEqual(len(games[0].sequence), 3)
tc.assertEqual(len(games[1].sequence), 2)
games = parse_sgf_collection(
b"(( (;X[1];X[2];X[3](;B[bc])) ();) (;Y[1];Y[2]) )(Nonsense")
tc.assertEqual(len(games), 2)
tc.assertEqual(len(games[0].sequence), 3)
tc.assertEqual(len(games[1].sequence), 2)
with tc.assertRaises(ValueError) as ar:
parse_sgf_collection(
b"(( (;X[1];X[2];X[3](;B[bc])) ();) (;Y[1];Y[2]")
tc.assertEqual(str(ar.exception),
"error parsing game 1: unexpected end of SGF data")
def test_parse_compose(tc):
pc = sgf_grammar.parse_compose
tc.assertEqual(pc(b"word"), (b"word", None))
tc.assertEqual(pc(b"word:"), (b"word", b""))
tc.assertEqual(pc(b"word:?"), (b"word", b"?"))
tc.assertEqual(pc(b"word:123"), (b"word", b"123"))
tc.assertEqual(pc(b"word:123:456"), (b"word", b"123:456"))
tc.assertEqual(pc(b":123"), (b"", b"123"))
tc.assertEqual(pc(r"word\:more".encode('ascii')), (r"word\:more".encode('ascii'), None))
tc.assertEqual(pc(r"word\:more:?".encode('ascii')), (r"word\:more".encode('ascii'), b"?"))
tc.assertEqual(pc(r"word\\:more:?".encode('ascii')), (b"word\\\\", b"more:?"))
tc.assertEqual(pc(r"word\\\:more:?".encode('ascii')),
(r"word\\\:more".encode('ascii'), b"?"))
tc.assertEqual(pc(b"word\\\nmore:123"), (b"word\\\nmore", b"123"))
def test_text_value(tc):
text_value = sgf_grammar.text_value
tc.assertEqual(text_value(b"abc "), b"abc ")
tc.assertEqual(text_value(b"ab c"), b"ab c")
tc.assertEqual(text_value(b"ab\tc"), b"ab c")
tc.assertEqual(text_value(b"ab \tc"), b"ab c")
tc.assertEqual(text_value(b"ab\nc"), b"ab\nc")
tc.assertEqual(text_value(b"ab\\\nc"), b"abc")
tc.assertEqual(text_value(b"ab\\\\\nc"), b"ab\\\nc")
tc.assertEqual(text_value(b"ab\xa0c"), b"ab\xa0c")
tc.assertEqual(text_value(b"ab\rc"), b"ab\nc")
tc.assertEqual(text_value(b"ab\r\nc"), b"ab\nc")
tc.assertEqual(text_value(b"ab\n\rc"), b"ab\nc")
tc.assertEqual(text_value(b"ab\r\n\r\nc"), b"ab\n\nc")
tc.assertEqual(text_value(b"ab\r\n\r\n\rc"), b"ab\n\n\nc")
tc.assertEqual(text_value(b"ab\\\r\nc"), b"abc")
tc.assertEqual(text_value(b"ab\\\n\nc"), b"ab\nc")
tc.assertEqual(text_value(b"ab\\\tc"), b"ab c")
# These can't actually appear as SGF PropValues; anything sane will do
tc.assertEqual(text_value(b"abc\\"), b"abc")
tc.assertEqual(text_value(b"abc]"), b"abc]")
def test_simpletext_value(tc):
simpletext_value = sgf_grammar.simpletext_value
tc.assertEqual(simpletext_value(b"abc "), b"abc ")
tc.assertEqual(simpletext_value(b"ab c"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\tc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab \tc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\nc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\\\nc"), b"abc")
tc.assertEqual(simpletext_value(b"ab\\\\\nc"), b"ab\\ c")
tc.assertEqual(simpletext_value(b"ab\xa0c"), b"ab\xa0c")
tc.assertEqual(simpletext_value(b"ab\rc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\r\nc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\n\rc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\r\n\r\nc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\r\n\r\n\rc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\\\r\nc"), b"abc")
tc.assertEqual(simpletext_value(b"ab\\\n\nc"), b"ab c")
tc.assertEqual(simpletext_value(b"ab\\\tc"), b"ab c")
# These can't actually appear as SGF PropValues; anything sane will do
tc.assertEqual(simpletext_value(b"abc\\"), b"abc")
tc.assertEqual(simpletext_value(b"abc]"), b"abc]")
def test_escape_text(tc):
tc.assertEqual(sgf_grammar.escape_text(b"abc"), b"abc")
tc.assertEqual(sgf_grammar.escape_text(r"a\bc".encode('ascii')), r"a\\bc".encode('ascii'))
tc.assertEqual(sgf_grammar.escape_text(r"ab[c]".encode('ascii')),
r"ab[c\]".encode('ascii'))
tc.assertEqual(sgf_grammar.escape_text(r"a\]bc".encode('ascii')),
r"a\\\]bc".encode('ascii'))
def test_text_roundtrip(tc):
def roundtrip(s):
return sgf_grammar.text_value(sgf_grammar.escape_text(s))
tc.assertEqual(roundtrip(b"abc"), b"abc")
tc.assertEqual(roundtrip(r"a\bc".encode('ascii')), r"a\bc".encode('ascii'))
tc.assertEqual(roundtrip(b"abc\\"), b"abc\\")
tc.assertEqual(roundtrip(b"ab]c"), b"ab]c")
tc.assertEqual(roundtrip(b"abc]"), b"abc]")
tc.assertEqual(roundtrip(r"abc\]".encode('ascii')), r"abc\]".encode('ascii'))
tc.assertEqual(roundtrip(b"ab\nc"), b"ab\nc")
tc.assertEqual(roundtrip(b"ab\n c"), b"ab\n c")
tc.assertEqual(roundtrip(b"ab\tc"), b"ab c")
tc.assertEqual(roundtrip(b"ab\r\nc\n"), b"ab\nc\n")
def test_serialise_game_tree(tc):
serialised = (b"(;AB[aa][ab][ac]C[comment \xa3];W[ab];C[];C[]"
b"(;B[bc])(;B[bd];W[ca](;B[da])(;B[db];\n"
b"W[ea])))\n")
coarse_game = sgf_grammar.parse_sgf_game(serialised)
tc.assertEqual(sgf_grammar.serialise_game_tree(coarse_game), serialised)
tc.assertEqual(sgf_grammar.serialise_game_tree(coarse_game, wrap=None),
serialised.replace(b"\n", b"")+b"\n")
|
|
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Script to list all NBAR datasets in DB which are not loaded into THREDDS
GA only - deprecated
Created on 25/09/2013
@author: u76345
'''
import xml.dom.minidom
import argparse
from datetime import datetime
import logging, os, re, copy
from agdc import DataCube
from EOtools.utils import log_multiline
#===============================================================================
# # Set top level standard output
# console_handler = logging.StreamHandler(sys.stdout)
# console_handler.setLevel(logging.INFO)
# console_formatter = logging.Formatter('%(message)s')
# console_handler.setFormatter(console_formatter)
#===============================================================================
logger = logging.getLogger('datacube.' + __name__)
class ThreddsChecker(DataCube):
'''
classdocs
'''
def parse_args(self):
"""Parse the command line arguments.
Returns:
argparse namespace object
"""
logger.debug(' Calling parse_args()')
_arg_parser = argparse.ArgumentParser('stacker')
# N.B: modtran_root is a direct overrides of config entries
# and its variable name must be prefixed with "_" to allow lookup in conf file
_arg_parser.add_argument('-C', '--config', dest='config_file',
default=os.path.join(self.agdc_root, 'agdc_default.conf'),
help='Stacker configuration file')
_arg_parser.add_argument('-d', '--debug', dest='debug',
default=False, action='store_const', const=True,
help='Debug mode flag')
_arg_parser.add_argument('-s', '--start_date', dest='start_date',
required=False, default=None,
help='Start Date in dd/mm/yyyy format')
_arg_parser.add_argument('-e', '--end_date', dest='end_date',
required=False, default=None,
help='End Date in dd/mm/yyyy format')
_arg_parser.add_argument('-a', '--satellite', dest='satellite',
required=False, default=None,
help='Short Satellite name (e.g. LS5, LS7)')
_arg_parser.add_argument('-n', '--sensor', dest='sensor',
required=False, default=None,
help='Sensor Name (e.g. TM, ETM+)')
return _arg_parser.parse_args()
def __init__(self, source_datacube=None, default_tile_type_id=1):
"""Constructor
Arguments:
source_datacube: Optional DataCube object whose connection and data will be shared
tile_type_id: Optional tile_type_id value (defaults to 1)
"""
if source_datacube:
# Copy values from source_datacube and then override command line args
self.__dict__ = copy(source_datacube.__dict__)
args = self.parse_args()
# Set instance attributes for every value in command line arguments file
for attribute_name in args.__dict__.keys():
attribute_value = args.__dict__[attribute_name]
self.__setattr__(attribute_name, attribute_value)
else:
DataCube.__init__(self) # Call inherited constructor
# Attempt to parse dates from command line arguments or config file
try:
self.start_date = datetime.strptime(self.start_date, '%Y%m%d').date()
except:
try:
self.start_date = datetime.strptime(self.start_date, '%d/%m/%Y').date()
except:
try:
self.start_date = datetime.strptime(self.start_date, '%Y-%m-%d').date()
except:
self.start_date= None
try:
self.end_date = datetime.strptime(self.end_date, '%Y%m%d').date()
except:
try:
self.end_date = datetime.strptime(self.end_date, '%d/%m/%Y').date()
except:
try:
self.end_date = datetime.strptime(self.end_date, '%Y-%m-%d').date()
except:
self.end_date= None
# Other variables set from config file only - not used
try:
self.min_path = int(self.min_path)
except:
self.min_path = None
try:
self.max_path = int(self.max_path)
except:
self.max_path = None
try:
self.min_row = int(self.min_row)
except:
self.min_row = None
try:
self.max_row = int(self.max_row)
except:
self.max_row = None
self.thredds_root = '/g/data1/v27/projects/EOS_delivery/LANDSAT/'
def check(self, kml_filename=None, wrs_shapefile='WRS-2_bound_world.kml'):
'''
check a KML file
'''
self.db_cursor = self.db_connection.cursor()
sql = """-- Find all NBAR acquisitions
select satellite_name as satellite, sensor_name as sensor,
x_ref as path, y_ref as row,
start_datetime, end_datetime,
dataset_path,
ll_lon, ll_lat,
lr_lon, lr_lat,
ul_lon, ul_lat,
ur_lon, ur_lat,
cloud_cover::integer, gcp_count::integer
from
(
select *
from dataset
where level_id = 2 -- NBAR
) dataset
inner join acquisition a using(acquisition_id)
inner join satellite using(satellite_id)
inner join sensor using(satellite_id, sensor_id)
where (%(start_date)s is null or end_datetime::date >= %(start_date)s)
and (%(end_date)s is null or end_datetime::date <= %(end_date)s)
and (%(satellite)s is null or satellite_tag = %(satellite)s)
and (%(sensor)s is null or sensor_name = %(sensor)s)
order by end_datetime
;
"""
params = {
'start_date': self.start_date,
'end_date': self.end_date,
'satellite': self.satellite,
'sensor': self.sensor
}
log_multiline(logger.debug, self.db_cursor.mogrify(sql, params), 'SQL', '\t')
self.db_cursor.execute(sql, params)
field_list = ['satellite',
'sensor',
'path',
'row',
'start_datetime',
'end_datetime',
'dataset_path',
'll_lon',
'll_lat',
'lr_lon',
'lr_lat',
'ul_lon',
'ul_lat',
'ur_lon',
'ur_lat',
'cloud_cover',
'gcp_count'
]
for record in self.db_cursor:
acquisition_info = {}
for field_index in range(len(field_list)):
acquisition_info[field_list[field_index]] = record[field_index]
acquisition_info['year'] = acquisition_info['end_datetime'].year
acquisition_info['month'] = acquisition_info['end_datetime'].month
acquisition_info['dataset_name'] = re.search('[^/]+$', acquisition_info['dataset_path']).group(0)
log_multiline(logger.debug, acquisition_info, 'acquisition_info', '\t')
thredds_dataset = '%s/%04d/%02d/%s_BX.nc' % (self.thredds_root, acquisition_info['year'], acquisition_info['month'], acquisition_info['dataset_name'])
#===================================================================
# if os.path.exists(thredds_dataset):
# print '%s exists' % (acquisition_info['dataset_name'])
# else:
# print '%s does not exist' % (acquisition_info['dataset_name'])
#===================================================================
if not os.path.exists(thredds_dataset):
print acquisition_info['dataset_path']
def main():
tc = ThreddsChecker()
tc.check()
if __name__ == '__main__':
main()
|
|
from collections import defaultdict
from unittest.mock import Mock, patch
from ctf_gameserver.lib.database import transaction_cursor
from ctf_gameserver.lib.test_util import DatabaseTestCase
from ctf_gameserver.controller import controller, database
@patch('ctf_gameserver.controller.database.update_scoring')
class MainLoopTest(DatabaseTestCase):
fixtures = ['tests/controller/fixtures/main_loop.json']
metrics = defaultdict(Mock)
@patch('time.sleep')
@patch('logging.warning')
def test_null(self, warning_mock, sleep_mock, _):
controller.main_loop_step(self.connection, self.metrics, False)
warning_mock.assert_called_with('Competition start and end time must be configured in the database')
sleep_mock.assert_called_once_with(60)
@patch('time.sleep')
def test_before_game(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "+1 hour"), '
' end = datetime("now", "+1 day")')
controller.main_loop_step(self.connection, self.metrics, False)
sleep_mock.assert_called_once_with(60)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, -1)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag')
total_flag_count = cursor.fetchone()[0]
self.assertEqual(total_flag_count, 0)
@patch('time.sleep')
def test_first_tick(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now"), '
' end = datetime("now", "+1 day")')
controller.main_loop_step(self.connection, self.metrics, False)
sleep_mock.assert_called_once_with(0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag')
total_flag_count = cursor.fetchone()[0]
self.assertEqual(total_flag_count, 6)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE service_id=1')
service_flag_count = cursor.fetchone()[0]
self.assertEqual(service_flag_count, 3)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE protecting_team_id=4')
team_flag_count = cursor.fetchone()[0]
self.assertEqual(team_flag_count, 2)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE tick=0')
tick_flag_count = cursor.fetchone()[0]
self.assertEqual(tick_flag_count, 6)
@patch('time.sleep')
def test_next_tick_undue(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "-1030 seconds"), '
' end = datetime("now", "+85370 seconds"), '
' current_tick=5')
controller.main_loop_step(self.connection, self.metrics, False)
sleep_mock.assert_called_once()
sleep_arg = sleep_mock.call_args[0][0]
self.assertGreater(sleep_arg, 40)
self.assertLessEqual(sleep_arg, 50)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 5)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag')
tick_flag_count = cursor.fetchone()[0]
self.assertEqual(tick_flag_count, 0)
@patch('time.sleep')
def test_next_tick_overdue(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "-19 minutes"), '
' end = datetime("now", "+1421 minutes"), '
' current_tick=5')
controller.main_loop_step(self.connection, self.metrics, False)
sleep_mock.assert_called_once_with(0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 6)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE tick=6')
tick_flag_count = cursor.fetchone()[0]
self.assertEqual(tick_flag_count, 6)
@patch('time.sleep')
def test_last_tick(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "-1 day"), '
' end = datetime("now", "+3 minutes"), '
' current_tick=479')
controller.main_loop_step(self.connection, self.metrics, False)
sleep_mock.assert_called_once_with(0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 480)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE tick=480')
tick_flag_count = cursor.fetchone()[0]
self.assertEqual(tick_flag_count, 6)
@patch('time.sleep')
def test_shortly_after_game(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "-1441 minutes"), '
' end = datetime("now"), '
' current_tick=479')
controller.main_loop_step(self.connection, self.metrics, False)
self.assertEqual(sleep_mock.call_count, 2)
self.assertEqual(sleep_mock.call_args_list[0][0][0], 0)
self.assertEqual(sleep_mock.call_args_list[1][0][0], 60)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 479)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag')
total_flag_count = cursor.fetchone()[0]
self.assertEqual(total_flag_count, 0)
@patch('time.sleep')
def test_long_after_game(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "-1465 minutes"), '
' end = datetime("now", "-25 minutes"), '
' current_tick=479')
controller.main_loop_step(self.connection, self.metrics, False)
self.assertEqual(sleep_mock.call_count, 2)
self.assertEqual(sleep_mock.call_args_list[0][0][0], 0)
self.assertEqual(sleep_mock.call_args_list[1][0][0], 60)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 479)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag')
total_flag_count = cursor.fetchone()[0]
self.assertEqual(total_flag_count, 0)
@patch('time.sleep')
def test_after_game_nonstop(self, sleep_mock, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('UPDATE scoring_gamecontrol SET start = datetime("now", "-1 day"), '
' end = datetime("now"), '
' current_tick=479')
controller.main_loop_step(self.connection, self.metrics, True)
sleep_mock.assert_called_once_with(0)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT current_tick FROM scoring_gamecontrol')
new_tick = cursor.fetchone()[0]
self.assertEqual(new_tick, 480)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT COUNT(*) FROM scoring_flag WHERE tick=480')
tick_flag_count = cursor.fetchone()[0]
self.assertEqual(tick_flag_count, 6)
@patch('ctf_gameserver.controller.database.update_scoring')
class DatabaseTest(DatabaseTestCase):
"""
Tests for the `ctf_gameserver.controller.database` module. Only tests special cases, the general
functionality is covered by MainLoopTest.
"""
fixtures = ['tests/controller/fixtures/main_loop.json']
def test_prohibit_changes(self, _):
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT * FROM scoring_gamecontrol ORDER BY id')
old_gamecontrol = cursor.fetchall()
cursor.execute('SELECT * FROM scoring_flag ORDER BY id')
old_flag = cursor.fetchall()
database.get_control_info(self.connection, prohibit_changes=True)
database.increase_tick(self.connection, prohibit_changes=True)
with transaction_cursor(self.connection) as cursor:
cursor.execute('SELECT * FROM scoring_gamecontrol ORDER BY id')
new_gamecontrol = cursor.fetchall()
cursor.execute('SELECT * FROM scoring_flag ORDER BY id')
new_flag = cursor.fetchall()
self.assertEqual(old_gamecontrol, new_gamecontrol)
self.assertEqual(old_flag, new_flag)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for slice op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in xrange(4):
with self.cached_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in xrange(4):
with self.cached_session(use_gpu=True):
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testSlicingWithInt64Index(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
# Slice using int64 Tensor.
i = constant_op.constant(1, dtype=dtypes.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int64 integer.
i = np.asarray(1).astype(np.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
slice_t = array_ops.slice(a_int32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
slice_t = array_ops.slice(a_float32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSlicingInt64Tensor(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
# Slice using int32 Tensor.
i = constant_op.constant(1, dtype=dtypes.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int32 integer.
i = np.asarray(1).astype(np.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
slice_t = array_ops.slice(a, [1], [2])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSelectAll(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = self.evaluate(scalar_t)
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[lo:hi])
def testScalarInput(self):
input_val = 0
with self.cached_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[:]
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def testInvalidIndex(self):
input_val = [1, 2]
with self.cached_session() as sess:
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
input_t = array_ops.placeholder(dtypes.int32)
slice_t = input_t[1:, 1:]
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
"out of range"):
sess.run([slice_t], feed_dict={input_t: input_val})
def _testSliceMatrixDim0(self, x, begin, size):
with self.cached_session(use_gpu=True):
tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with self.session(use_gpu=True) as sess:
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
self.assertAllEqual(slice_val, inp[:2, :2])
self.assertAllEqual(slice2_val, inp[:2, :2])
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
with self.session(use_gpu=True):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
with self.session(use_gpu=True) as sess:
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1)
for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]
+ sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:
indices[5] + sizes[5]]
slice_val, slice2_val = sess.run([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[
3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[
5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testPartialShapeInference(self):
z = array_ops.zeros((1, 2, 3))
self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.cached_session(use_gpu=True):
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in xrange(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[slices] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.cached_session(use_gpu=True):
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = gradients_impl.gradients(out, inp)[0].eval()
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSize2D(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with self.cached_session(use_gpu=True) as sess:
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = gradients_impl.gradients(loss1, x)[0]
g2 = gradients_impl.gradients(loss2, x)[0]
g1_val, g2_val = sess.run([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def testGradientsAll(self):
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2D()
def testNotIterable(self):
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesWithPredicateMatch(
TypeError, lambda e: "Tensor objects are only iterable" in str(e)):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.session(use_gpu=True):
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], self.evaluate(res))
if __name__ == "__main__":
test.main()
|
|
"""Default variable filters."""
import re
from decimal import Decimal, InvalidOperation, ROUND_HALF_UP
import random as random_module
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.template.base import Variable, Library
from django.conf import settings
from django.utils import formats
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe, SafeData
from django.utils.translation import ugettext, ungettext
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if isinstance(args[0], SafeData) and getattr(func, 'is_safe', False):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
addslashes.is_safe = True
addslashes = stringfilter(addslashes)
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
capfirst.is_safe=True
capfirst = stringfilter(capfirst)
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
from django.utils.html import escapejs
return escapejs(value)
escapejs = stringfilter(escapejs)
def fix_ampersands(value):
"""Replaces ampersands with ``&`` entities."""
from django.utils.html import fix_ampersands
return fix_ampersands(value)
fix_ampersands.is_safe=True
fix_ampersands = stringfilter(fix_ampersands)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) / (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_unicode(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return u''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_unicode(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return u''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format(u'%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal(u'1.0') / (Decimal(10) ** abs(p))
try:
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP).as_tuple()
digits = [unicode(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append(u'0')
digits.insert(-exponent, u'.')
if sign:
digits.append(u'-')
number = u''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
floatformat.is_safe = True
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
iriencode.is_safe = True
iriencode = stringfilter(iriencode)
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
from django.utils.html import escape
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
linenumbers.is_safe = True
linenumbers.needs_autoescape = True
linenumbers = stringfilter(linenumbers)
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
lower.is_safe = True
lower = stringfilter(lower)
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
make_list.is_safe = False
make_list = stringfilter(make_list)
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
slugify.is_safe = True
slugify = stringfilter(slugify)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
stringformat.is_safe = True
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
title.is_safe = True
title = stringfilter(title)
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
from django.utils.text import truncate_words
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return truncate_words(value, length)
truncatewords.is_safe = True
truncatewords = stringfilter(truncatewords)
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
from django.utils.text import truncate_html_words
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return truncate_html_words(value, length)
truncatewords_html.is_safe = True
truncatewords_html = stringfilter(truncatewords_html)
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
upper.is_safe = False
upper = stringfilter(upper)
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
from django.utils.http import urlquote
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
urlencode.is_safe = False
urlencode = stringfilter(urlencode)
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
from django.utils.html import urlize
return mark_safe(urlize(value, nofollow=True, autoescape=autoescape))
urlize.is_safe=True
urlize.needs_autoescape = True
urlize = stringfilter(urlize)
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
from django.utils.html import urlize
return mark_safe(urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
urlizetrunc.is_safe = True
urlizetrunc.needs_autoescape = True
urlizetrunc = stringfilter(urlizetrunc)
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
wordcount.is_safe = False
wordcount = stringfilter(wordcount)
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
from django.utils.text import wrap
return wrap(value, int(arg))
wordwrap.is_safe = True
wordwrap = stringfilter(wordwrap)
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
ljust.is_safe = True
ljust = stringfilter(ljust)
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
rjust.is_safe = True
rjust = stringfilter(rjust)
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
center.is_safe = True
center = stringfilter(center)
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, u'')
if safe and arg != ';':
return mark_safe(value)
return value
cut = stringfilter(cut)
###################
# HTML STRINGS #
###################
def escape(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
from django.utils.safestring import mark_for_escaping
return mark_for_escaping(value)
escape.is_safe = True
escape = stringfilter(escape)
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
from django.utils.html import escape
return mark_safe(escape(value))
force_escape = stringfilter(force_escape)
force_escape.is_safe = True
def linebreaks(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
from django.utils.html import linebreaks
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
linebreaks.is_safe = True
linebreaks.needs_autoescape = True
linebreaks = stringfilter(linebreaks)
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
if autoescape and not isinstance(value, SafeData):
from django.utils.html import escape
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
linebreaksbr.is_safe = True
linebreaksbr.needs_autoescape = True
linebreaksbr = stringfilter(linebreaksbr)
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
safe.is_safe = True
safe = stringfilter(safe)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_unicode(obj)) for obj in value]
safeseq.is_safe = True
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = u'(%s)' % u'|'.join(tags)
starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile(u'</%s>' % tags_re)
value = starttag_re.sub(u'', value)
value = endtag_re.sub(u'', value)
return value
removetags.is_safe = True
removetags = stringfilter(removetags)
def striptags(value):
"""Strips all [X]HTML tags."""
from django.utils.html import strip_tags
return strip_tags(value)
striptags.is_safe = True
striptags = stringfilter(striptags)
###################
# LISTS #
###################
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
return sorted(value, key=Variable(arg).resolve)
dictsort.is_safe = False
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
return sorted(value, key=Variable(arg).resolve, reverse=True)
dictsortreversed.is_safe = False
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return u''
first.is_safe = False
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_unicode, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
join.is_safe = True
join.needs_autoescape = True
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return u''
last.is_safe = True
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
length.is_safe = True
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
length_is.is_safe = False
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
random.is_safe = True
def slice_(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(u':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
slice_.is_safe = True
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
from django.utils.html import conditional_escape
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
it = iter(second_item) # see if second item is iterable
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = u'\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_unicode(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
unordered_list.is_safe = True
unordered_list.needs_autoescape = True
###################
# INTEGERS #
###################
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except:
return value
add.is_safe = False
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
get_digit.is_safe = False
###################
# DATES #
###################
def date(value, arg=None):
"""Formats a date according to the given format."""
from django.utils.dateformat import format
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
date.is_safe = False
def time(value, arg=None):
"""Formats a time according to the given format."""
from django.utils import dateformat
if value in (None, u''):
return u''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return dateformat.time_format(value, arg)
except AttributeError:
return ''
time.is_safe = False
def timesince(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timesince
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return u''
timesince.is_safe = False
def timeuntil(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timeuntil
if not value:
return u''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return u''
timeuntil.is_safe = False
###################
# LOGIC #
###################
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
default.is_safe = False
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
default_if_none.is_safe = False
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
divisibleby.is_safe = False
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings accoding to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(u',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
yesno.is_safe = False
###################
# MISC #
###################
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
if bytes < 1024:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
filesizeformat.is_safe = True
def pluralize(value, arg=u's'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not u',' in arg:
arg = u',' + arg
bits = arg.split(u',')
if len(bits) > 2:
return u''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
pluralize.is_safe = False
def phone2numeric(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
from django.utils.text import phone2numeric
return phone2numeric(value)
phone2numeric.is_safe = True
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
from pprint import pformat
try:
return pformat(value)
except Exception, e:
return u"Error in formatting: %s" % force_unicode(e, errors="replace")
pprint.is_safe = True
# Syntax: register.filter(name of filter, callback)
register.filter(add)
register.filter(addslashes)
register.filter(capfirst)
register.filter(center)
register.filter(cut)
register.filter(date)
register.filter(default)
register.filter(default_if_none)
register.filter(dictsort)
register.filter(dictsortreversed)
register.filter(divisibleby)
register.filter(escape)
register.filter(escapejs)
register.filter(filesizeformat)
register.filter(first)
register.filter(fix_ampersands)
register.filter(floatformat)
register.filter(force_escape)
register.filter(get_digit)
register.filter(iriencode)
register.filter(join)
register.filter(last)
register.filter(length)
register.filter(length_is)
register.filter(linebreaks)
register.filter(linebreaksbr)
register.filter(linenumbers)
register.filter(ljust)
register.filter(lower)
register.filter(make_list)
register.filter(phone2numeric)
register.filter(pluralize)
register.filter(pprint)
register.filter(removetags)
register.filter(random)
register.filter(rjust)
register.filter(safe)
register.filter(safeseq)
register.filter('slice', slice_)
register.filter(slugify)
register.filter(stringformat)
register.filter(striptags)
register.filter(time)
register.filter(timesince)
register.filter(timeuntil)
register.filter(title)
register.filter(truncatewords)
register.filter(truncatewords_html)
register.filter(unordered_list)
register.filter(upper)
register.filter(urlencode)
register.filter(urlize)
register.filter(urlizetrunc)
register.filter(wordcount)
register.filter(wordwrap)
register.filter(yesno)
|
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## don't touch: must be first import!
import choosereactor
import os, json, sys, pkg_resources
from twisted.internet import reactor
from twisted.python import log, usage
from twisted.internet.defer import Deferred
## for versions
import autobahn
import autobahntestsuite
from autobahn.websocket.utf8validator import Utf8Validator
from autobahn.websocket.xormasker import XorMaskerNull
## WebSocket testing modes
import testee
import fuzzing
## WAMP testing modes
import wamptestee
import wampfuzzing
## Misc testing modes
import echo
import broadcast
import massconnect
import wsperfcontrol
import wsperfmaster
import serializer
from spectemplate import SPEC_FUZZINGSERVER, \
SPEC_FUZZINGCLIENT, \
SPEC_FUZZINGWAMPSERVER, \
SPEC_FUZZINGWAMPCLIENT, \
SPEC_WSPERFCONTROL, \
SPEC_MASSCONNECT
class WsTestOptions(usage.Options):
"""
Reads options from the command-line and checks them for plausibility.
"""
# Available modes, specified with the --mode (or short: -m) flag.
MODES = ['echoserver',
'echoclient',
'broadcastclient',
'broadcastserver',
'fuzzingserver',
'fuzzingclient',
#'fuzzingwampserver',
#'fuzzingwampclient',
'testeeserver',
'testeeclient',
#'wsperfcontrol',
#'wsperfmaster',
#'wampserver',
#'wamptesteeserver',
#'wampclient',
'massconnect',
#'web',
#'import',
#'export',
'serializer'
]
# Modes that need a specification file
MODES_NEEDING_SPEC = ['fuzzingclient',
'fuzzingserver',
'fuzzingwampserver',
'fuzzingwampclient',
'wsperfcontrol',
'massconnect',
'import']
# Modes that need a Websocket URI
MODES_NEEDING_WSURI = ['echoclient',
'echoserver',
'broadcastclient',
'broadcastserver',
'testeeclient',
'testeeserver',
'wsperfcontrol',
'wampserver',
'wampclient',
'wamptesteeserver']
# Default content of specification files for various modes
DEFAULT_SPECIFICATIONS = {'fuzzingclient': SPEC_FUZZINGCLIENT,
'fuzzingserver': SPEC_FUZZINGSERVER,
'wsperfcontrol': SPEC_WSPERFCONTROL,
'massconnect': SPEC_MASSCONNECT,
'fuzzingwampclient': SPEC_FUZZINGWAMPCLIENT,
'fuzzingwampserver': SPEC_FUZZINGWAMPSERVER}
optParameters = [
['mode', 'm', None, 'Test mode, one of: %s [required]' % ', '.join(MODES)],
['testset', 't', None, 'Run a test set from an import test spec.'],
['spec', 's', None, 'Test specification file [required in some modes].'],
['outfile', 'o', None, 'Output filename for modes that generate testdata.'],
['wsuri', 'w', None, 'WebSocket URI [required in some modes].'],
['ident', 'i', None, ('Testee client identifier [optional for client testees].')],
['key', 'k', None, ('Server private key file for secure WebSocket (WSS) [required in server modes for WSS].')],
['cert', 'c', None, ('Server certificate file for secure WebSocket (WSS) [required in server modes for WSS].')]
]
optFlags = [
['debug', 'd', 'Debug output [default: off].'],
['autobahnversion', 'a', 'Print version information for Autobahn and AutobahnTestSuite.']
]
def postOptions(self):
"""
Process the given options. Perform plausibility checks, etc...
"""
if self['autobahnversion']:
print "Autobahn %s" % autobahn.version
print "AutobahnTestSuite %s" % autobahntestsuite.version
sys.exit(0)
if not self['mode']:
raise usage.UsageError, "a mode must be specified to run!"
if self['mode'] not in WsTestOptions.MODES:
raise usage.UsageError, (
"Mode '%s' is invalid.\nAvailable modes:\n\t- %s" % (
self['mode'], "\n\t- ".join(sorted(WsTestOptions.MODES))))
if (self['mode'] in WsTestOptions.MODES_NEEDING_WSURI and not self['wsuri']):
raise usage.UsageError, "mode needs a WebSocket URI!"
class WsTestRunner(object):
"""
Testsuite driver.
"""
def __init__(self, options, spec = None):
self.options = options
self.spec = spec
self.debug = self.options.get('debug', False)
if self.debug:
log.startLogging(sys.stdout)
self.mode = str(self.options['mode'])
def startService(self):
"""
Start mode specific services.
"""
print
print "Using Twisted reactor class %s" % str(reactor.__class__)
print "Using UTF8 Validator class %s" % str(Utf8Validator)
print "Using XOR Masker classes %s" % str(XorMaskerNull)
#print "Using JSON processor module '%s'" % str(autobahn.wamp.json_lib.__name__)
print
if self.mode == "import":
return self.startImportSpec(self.options['spec'])
elif self.mode == "export":
return self.startExportSpec(self.options['testset'], self.options.get('spec', None))
elif self.mode == "fuzzingwampclient":
return self.startFuzzingWampClient(self.options['testset'])
elif self.mode == "web":
return self.startWeb(debug = self.debug)
elif self.mode == "testeeclient":
return testee.startClient(self.options['wsuri'], ident = self.options['ident'], debug = self.debug)
elif self.mode == "testeeserver":
return testee.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "broadcastclient":
return broadcast.startClient(self.options['wsuri'], debug = self.debug)
elif self.mode == "broadcastserver":
return broadcast.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "echoclient":
return echo.startClient(self.options['wsuri'], debug = self.debug)
elif self.mode == "echoserver":
return echo.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "fuzzingclient":
return fuzzing.startClient(self.spec, debug = self.debug)
elif self.mode == "fuzzingserver":
return fuzzing.startServer(self.spec, debug = self.debug)
elif self.mode == "wsperfcontrol":
return wsperfcontrol.startClient(self.options['wsuri'], self.spec, debug = self.debug)
elif self.mode == "wsperfmaster":
return wsperfmaster.startServer(debug = self.debug)
elif self.mode == "massconnect":
return massconnect.startClient(self.spec, debug = self.debug)
elif self.mode == "serializer":
return serializer.start(outfilename = self.options['outfile'], debug = self.debug)
else:
raise Exception("no mode '%s'" % self.mode)
def start(options, spec = None):
"""
Actually startup a wstest run.
:param options: Global options controlling wstest.
:type options: dict
:param spec: Test specification needed for certain modes. If none is given, but
a spec is needed, a default spec is used.
:type spec: dict
"""
if options['mode'] in WsTestOptions.MODES_NEEDING_SPEC and spec is None:
spec = json.loads(WsTestOptions.DEFAULT_SPECIFICATIONS[options['mode']])
wstest = WsTestRunner(options, spec)
res = wstest.startService()
## only start reactor for modes needing it
##
if res:
## if mode wants to shutdown reactor after done (e.g. clients),
## hook up machinery to do so
##
if isinstance(res, Deferred):
def shutdown(_):
reactor.stop()
res.addBoth(shutdown)
reactor.run()
def run():
"""
Run wstest from command line. This parses command line args etc.
"""
## parse wstest command lines options
##
cmdOpts = WsTestOptions()
try:
cmdOpts.parseOptions()
except usage.UsageError, errortext:
print '%s %s\n' % (sys.argv[0], errortext)
print 'Try %s --help for usage details\n' % sys.argv[0]
sys.exit(1)
else:
options = cmdOpts.opts
## check if mode needs a spec ..
##
if options['mode'] in WsTestOptions.MODES_NEEDING_SPEC:
## .. if none was given ..
##
if not options['spec']:
## .. assume canonical specfile name ..
##
filename = "%s.json" % options['mode']
options['spec'] = filename
if not os.path.isfile(filename):
## .. if file does not exist, autocreate a spec file
##
content = WsTestOptions.DEFAULT_SPECIFICATIONS[options['mode']]
print "Auto-generating spec file '%s'" % filename
f = open(filename, 'w')
f.write(content)
f.close()
else:
## .. use existing one
##
print "Using implicit spec file '%s'" % filename
else:
## use explicitly given specfile
##
print "Using explicit spec file '%s'" % options['spec']
## now load the spec ..
##
spec_filename = os.path.abspath(options['spec'])
print "Loading spec from %s" % spec_filename
spec = json.loads(open(spec_filename).read())
else:
## mode does not rely on spec
##
spec = None
## now start a wstest run ..
##
start(options, spec)
if __name__ == '__main__':
run()
|
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import site
import sys
import os
core_suffix = 'so'
if os.name == 'nt':
core_suffix = 'pyd'
has_avx_core = False
has_noavx_core = False
current_path = os.path.abspath(os.path.dirname(__file__))
if os.path.exists(current_path + os.sep + 'core_avx.' + core_suffix):
has_avx_core = True
if os.path.exists(current_path + os.sep + 'core_noavx.' + core_suffix):
has_noavx_core = True
try:
if os.name == 'nt':
third_lib_path = current_path + os.sep + '..' + os.sep + 'libs'
os.environ['path'] = third_lib_path + ';' + os.environ['path']
sys.path.insert(0, third_lib_path)
except ImportError as e:
from .. import compat as cpt
if os.name == 'nt':
executable_path = os.path.abspath(os.path.dirname(sys.executable))
raise ImportError(
"""NOTE: You may need to run \"set PATH=%s;%%PATH%%\"
if you encounters \"DLL load failed\" errors. If you have python
installed in other directory, replace \"%s\" with your own
directory. The original error is: \n %s""" %
(executable_path, executable_path, cpt.get_exception_message(e)))
else:
raise ImportError(
"""NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\"
if you encounters \"libmkldnn.so not found\" errors. If you have python
installed in other directory, replace \"/usr/local/lib\" with your own
directory. The original error is: \n""" + cpt.get_exception_message(e))
except Exception as e:
raise e
def avx_supported():
"""
Whether current system(Linux, MacOS, Windows) is supported with AVX.
"""
import platform
from .. import compat as cpt
sysstr = platform.system().lower()
has_avx = False
if sysstr == 'linux':
try:
has_avx = os.popen('cat /proc/cpuinfo | grep -i avx').read() != ''
except Exception as e:
sys.stderr.write('Can not get the AVX flag from /proc/cpuinfo.\n'
'The original error is: %s\n' %
cpt.get_exception_message(e))
return has_avx
elif sysstr == 'darwin':
try:
has_avx = os.popen(
'sysctl machdep.cpu.features | grep -i avx').read() != ''
except Exception as e:
sys.stderr.write(
'Can not get the AVX flag from machdep.cpu.features.\n'
'The original error is: %s\n' % cpt.get_exception_message(e))
if not has_avx:
try:
has_avx = os.popen(
'sysctl machdep.cpu.leaf7_features | grep -i avx').read(
) != ''
except Exception as e:
sys.stderr.write(
'Can not get the AVX flag from machdep.cpu.leaf7_features.\n'
'The original error is: %s\n' %
cpt.get_exception_message(e))
return has_avx
elif sysstr == 'windows':
import ctypes
ONE_PAGE = ctypes.c_size_t(0x1000)
def asm_func(code_str, restype=ctypes.c_uint32, argtypes=()):
# Call the code_str as a function
# Alloc 1 page to ensure the protection
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
address = pfnVirtualAlloc(None, ONE_PAGE, MEM_COMMIT,
PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, code_str, len(code_str)) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(
ctypes.c_void_p(address), ONE_PAGE, PAGE_EXECUTE,
ctypes.byref(ctypes.c_ulong(0)))
if not res:
raise Exception("Failed VirtualProtect")
# Flush instruction cache
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
res = ctypes.windll.kernel32.FlushInstructionCache(
prochandle, ctypes.c_void_p(address), ONE_PAGE)
if not res:
raise Exception("Failed FlushInstructionCache")
# Cast the memory to function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
func = functype(address)
return func, address
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
# mov eax,0x1; cpuid; mov cx, ax; ret
code_str = b"\xB8\x01\x00\x00\x00\x0f\xa2\x89\xC8\xC3"
avx_bit = 28
retval = 0
try:
# Convert the code_str into a function that returns uint
func, address = asm_func(code_str)
retval = func()
ctypes.windll.kernel32.VirtualFree(
ctypes.c_void_p(address), ctypes.c_size_t(0), ONE_PAGE)
except Exception as e:
sys.stderr.write('Failed getting the AVX flag on Windows.\n'
'The original error is: %s\n' %
cpt.get_exception_message(e))
return (retval & (1 << avx_bit)) > 0
else:
sys.stderr.write('Do not get AVX flag on %s\n' % sysstr)
return False
load_noavx = False
if avx_supported():
try:
from .core_avx import *
from .core_avx import __doc__, __file__, __name__, __package__
from .core_avx import __unittest_throw_exception__
from .core_avx import _append_python_callable_object_and_return_id
from .core_avx import _cleanup, _Scope
from .core_avx import _get_use_default_grad_op_desc_maker_ops
from .core_avx import _is_program_version_supported
from .core_avx import _set_eager_deletion_mode
from .core_avx import _set_fuse_parameter_group_size
from .core_avx import _set_fuse_parameter_memory_size
from .core_avx import _is_dygraph_debug_enabled
from .core_avx import _dygraph_debug_level
from .core_avx import _set_paddle_lib_path
from .core_avx import _save_static_dict
from .core_avx import _load_static_dict
from .core_avx import _save_dygraph_dict
from .core_avx import _load_dygraph_dict
from .core_avx import _create_loaded_parameter
except Exception as e:
if has_avx_core:
raise e
else:
from .. import compat as cpt
sys.stderr.write(
'WARNING: Do not have avx core. You may not build with AVX, '
'but AVX is supported on local machine.\n You could build paddle '
'WITH_AVX=ON to get better performance.\n'
'The original error is: %s\n' % cpt.get_exception_message(e))
load_noavx = True
else:
load_noavx = True
if load_noavx:
try:
from .core_noavx import *
from .core_noavx import __doc__, __file__, __name__, __package__
from .core_noavx import __unittest_throw_exception__
from .core_noavx import _append_python_callable_object_and_return_id
from .core_noavx import _cleanup, _Scope
from .core_noavx import _get_use_default_grad_op_desc_maker_ops
from .core_noavx import _is_program_version_supported
from .core_noavx import _set_eager_deletion_mode
from .core_noavx import _set_fuse_parameter_group_size
from .core_noavx import _set_fuse_parameter_memory_size
from .core_noavx import _is_dygraph_debug_enabled
from .core_noavx import _dygraph_debug_level
from .core_noavx import _set_paddle_lib_path
from .core_noavx import _save_static_dict
from .core_noavx import _load_static_dict
from .core_noavx import _save_dygraph_dict
from .core_noavx import _load_dygraph_dict
from .core_noavx import _create_loaded_parameter
except Exception as e:
if has_noavx_core:
sys.stderr.write(
'Error: Can not import noavx core while this file exists ' +
current_path + os.sep + 'core_noavx.' + core_suffix + '\n')
raise e
# set paddle lib path
def set_paddle_lib_path():
site_dirs = site.getsitepackages() if hasattr(
site,
'getsitepackages') else [x for x in sys.path if 'site-packages' in x]
for site_dir in site_dirs:
lib_dir = os.path.sep.join([site_dir, 'paddle', 'libs'])
if os.path.exists(lib_dir):
_set_paddle_lib_path(lib_dir)
return
if hasattr(site, 'USER_SITE'):
lib_dir = os.path.sep.join([site.USER_SITE, 'paddle', 'libs'])
if os.path.exists(lib_dir):
_set_paddle_lib_path(lib_dir)
set_paddle_lib_path()
|
|
# Copyright 2017-2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch the game and set up communication."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import os
import platform
import shutil
import subprocess
import tempfile
import time
from absl import flags
from future.builtins import range # pylint: disable=redefined-builtin
import portpicker
from pysc2.lib import remote_controller
from pysc2.lib import stopwatch
flags.DEFINE_bool(
"sc2_verbose", False, "Enable SC2 verbose logging.", allow_hide_cpp=True)
flags.DEFINE_bool(
"sc2_verbose_mp", False, "Enable SC2 verbose multiplayer logging.")
flags.DEFINE_bool("sc2_gdb", False, "Run SC2 in gdb.")
flags.DEFINE_bool("sc2_strace", False, "Run SC2 in strace.")
flags.DEFINE_integer("sc2_port", None,
"If set, connect to the instance on "
"localhost:sc2_port instead of launching one.")
FLAGS = flags.FLAGS
sw = stopwatch.sw
class SC2LaunchError(Exception):
pass
class StarcraftProcess(object):
"""Launch a starcraft server, initialize a controller, and later, clean up.
This is best used from run_configs, which decides which version to run, and
where to find it.
It is important to call `close` or use it as a context manager, otherwise
you'll likely leak temp files and SC2 processes.
"""
def __init__(self, run_config, exec_path, version, full_screen=False,
extra_args=None, verbose=False, host=None, port=None,
connect=True, timeout_seconds=None, window_size=(640, 480),
window_loc=(50, 50), **kwargs):
"""Launch the SC2 process.
Args:
run_config: `run_configs.lib.RunConfig` object.
exec_path: Path to the binary to run.
version: `run_configs.lib.Version` object.
full_screen: Whether to launch the game window full_screen on win/mac.
extra_args: List of additional args for the SC2 process.
verbose: Whether to have the SC2 process do verbose logging.
host: IP for the game to listen on for its websocket. This is
usually "127.0.0.1", or "::1", but could be others as well.
port: Port SC2 should listen on for the websocket.
connect: Whether to create a RemoteController to connect.
timeout_seconds: Timeout for the remote controller.
window_size: Screen size if not full screen.
window_loc: Screen location if not full screen.
**kwargs: Extra arguments for _launch (useful for subclasses).
"""
self._proc = None
self._controller = None
self._check_exists(exec_path)
self._tmp_dir = tempfile.mkdtemp(prefix="sc-", dir=run_config.tmp_dir)
self._host = host or "127.0.0.1"
self._port = FLAGS.sc2_port or port or portpicker.pick_unused_port()
self._version = version
args = [
exec_path,
"-listen", self._host,
"-port", str(self._port),
"-dataDir", os.path.join(run_config.data_dir, ""),
"-tempDir", os.path.join(self._tmp_dir, ""),
]
if ":" in self._host:
args += ["-ipv6"]
if platform.system() != "Linux":
if full_screen:
args += ["-displayMode", "1"]
else:
args += [
"-displayMode", "0",
"-windowwidth", str(window_size[0]),
"-windowheight", str(window_size[1]),
"-windowx", str(window_loc[0]),
"-windowy", str(window_loc[1]),
]
if verbose or FLAGS.sc2_verbose:
args += ["-verbose"]
if FLAGS.sc2_verbose_mp:
args += ["-verboseMP"]
if self._version and self._version.data_version:
args += ["-dataVersion", self._version.data_version.upper()]
if extra_args:
args += extra_args
if FLAGS.sc2_gdb:
print("Launching: gdb", args[0])
print("GDB run command:")
print(" run %s" % " ".join(args[1:]))
print("\n")
args = ["gdb", args[0]]
timeout_seconds = 3600 * 6
elif FLAGS.sc2_strace:
strace_out = "/tmp/sc2-strace.txt"
print("Launching in strace. Redirecting output to", strace_out)
args = ["strace", "-f", "-o", strace_out] + args
else:
logging.info("Launching SC2: %s", " ".join(args))
try:
with sw("startup"):
if not FLAGS.sc2_port:
self._proc = self._launch(run_config, args, **kwargs)
if connect:
self._controller = remote_controller.RemoteController(
self._host, self._port, self, timeout_seconds=timeout_seconds)
except:
self.close()
raise
@sw.decorate
def close(self):
"""Shut down the game and clean up."""
if hasattr(self, "_controller") and self._controller:
self._controller.quit()
self._controller.close()
self._controller = None
self._shutdown()
if hasattr(self, "_port") and self._port:
if not FLAGS.sc2_port:
portpicker.return_port(self._port)
self._port = None
if hasattr(self, "_tmp_dir") and os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
@property
def controller(self):
return self._controller
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def version(self):
return self._version
def __enter__(self):
return self.controller
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
self.close()
def __del__(self):
# Prefer using a context manager, but this cleans most other cases.
self.close()
def _check_exists(self, exec_path):
if not os.path.isfile(exec_path):
raise RuntimeError("Trying to run '%s', but it doesn't exist" % exec_path)
if not os.access(exec_path, os.X_OK):
raise RuntimeError(
"Trying to run '%s', but it isn't executable." % exec_path)
def _launch(self, run_config, args, **kwargs):
"""Launch the process and return the process object."""
del kwargs
try:
with sw("popen"):
return subprocess.Popen(args, cwd=run_config.cwd, env=run_config.env)
except OSError:
logging.exception("Failed to launch")
raise SC2LaunchError("Failed to launch: %s" % args)
def _shutdown(self):
"""Terminate the sub-process."""
if self._proc:
ret = _shutdown_proc(self._proc, 3)
logging.info("Shutdown with return code: %s", ret)
self._proc = None
@property
def running(self):
if FLAGS.sc2_port:
return True
# poll returns None if it's running, otherwise the exit code.
return self._proc and (self._proc.poll() is None)
@property
def pid(self):
return self._proc.pid if self.running else None
def _shutdown_proc(p, timeout):
"""Wait for a proc to shut down, then terminate or kill it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
p.terminate()
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait()
|
|
from django.shortcuts import render, render_to_response, redirect, get_object_or_404
from django.contrib.auth import authenticate, login , logout
from .models import Course, Department, User, Student, ExamPaper, Material, Announcement, CourseAllotment, Bookmark, Feedback, Contributor, Stat
from .forms import RegisterForm , LoginForm , AnnouncementForm , MaterialForm , ExamPaperForm, FeedbackForm, AvatorForm, ForgetPasswordForm
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import JsonResponse,HttpResponse
from django.urls import reverse
import datetime
import json
rewardvalue=5
# Create your views here.
def home(request):
if request.user.is_anonymous():
return render(request,"feed.html",context={})
bookmarks =Bookmark.objects.filter(user=request.user)
bookmarkcourses = Bookmark.objects.select_related('course').filter(user=request.user)
courselist=[]
for bookmark in bookmarks:
courselist.append(bookmark.course)
start_from = int(request.GET.get('start_from',0))
announcements = Announcement.objects.select_related('author').filter(course__in=courselist).order_by('-updated_on')[start_from*6:start_from*6+6]
return render(request,"feed.html",context={"feed":announcements,"next":start_from+1,"bookmark":bookmarkcourses})
def about(request):
stats = Stat.objects.get(tag='initial')
return render(request,"about.html",context={'stats':stats})
def _logout(request):
logout(request)
return redirect('home')
def forgetpassword(request):
if request.method =='GET':
form = ForgetPasswordForm()
return render(request,"form.html",context={"form":form})
elif request.method == 'POST':
return render(request,"form.html",context={"message":"Check your email for password :)"})
def _login(request):
if request.method =='GET':
form = LoginForm()
return render(request,"login.html",context={"form":form})
elif request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
print(user.email)
return redirect('home')
form = LoginForm()
return render(request,"login.html",context={"form":form,"message":"forget password"})
def _register(request,register_as=None):
if request.method =='GET':
form = RegisterForm()
return render(request,"register.html",context={"form":form})
elif request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
user = form.save()
if(register_as=='student'):
user.user_role = 'student'
elif(register_as=='instructor'):
user.user_role = 'instructor'
user.save()
stat = Stat.objects.get(tag='initial')
stat.user_count +=1
stat.save()
return redirect(reverse('login'))
return redirect(reverse('register',kwargs={"register_as":register_as}))
@login_required
def profile(request):
if request.method == 'GET':
email = request.GET.get('email',request.user)
form = AvatorForm()
owner = User.objects.get(email=email)
contributor,created = Contributor.objects.get_or_create(user=owner)
is_owner = True if owner == request.user else False
if owner.user_role == "student":
student,created = Student.objects.get_or_create(user=owner)
return render(request,"profile.html",context={"user":owner,"student":student,'form':form, 'is_owner':is_owner,"contributor":contributor})
return render(request,"profile.html",context={"user":owner,'form':form, 'is_owner':is_owner,"contributor":contributor})
elif request.method =='POST':
if 'name' in request.POST.keys():
if request.POST['name'] in ['semester','registration_no','branch']:
student = Student.objects.get(user=request.user)
if request.POST['name'] == 'semester':
print('yeah')
student.semester = request.POST['value']
elif request.POST['name'] == 'registration_no':
student.registration_no = request.POST['value']
elif request.POST['name'] == 'branch':
student.branch = request.POST['value']
student.save()
elif request.POST['name'] in ['first_name','last_name']:
user = User.objects.get(email = request.user.email)
if request.POST['name'] == 'first_name':
user.first_name = request.POST['value']
elif request.POST['name'] == 'last_name':
user.last_name = request.POST['value']
user.save()
else:
user = User.objects.get(email=request.user.email)
form = AvatorForm(request.POST,request.FILES)
if form.is_valid():
user.avatar = form.cleaned_data["avator"]
user.save()
print(user.avatar)
print(form.errors)
return redirect(reverse('profile'))
# Profile edit to be implemented.
def getDepartments(request):
if request.method =='GET':
dept = serializers.serialize("json",Department.objects.all(),use_natural_foreign_keys=True)
data = json.loads(dept)
result = {"result":data}
return HttpResponse(json.dumps(result),content_type='application/json')
def getCourses(request,department=None):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
course = serializers.serialize("json",Course.objects.filter(dept=dept),use_natural_foreign_keys=True)
data = json.loads(course)
result = {"result":data}
return HttpResponse(json.dumps(result),content_type='application/json')
def Announcements(request,department=None,coursecode=None):
if request.method =='GET':
form= AnnouncementForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
if not request.user.is_authenticated:
return redirect(reverse('login'))
form = AnnouncementForm(request.POST,request.FILES)
if(form.is_valid()):
obj=Announcement()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.description = form.cleaned_data["description"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.announcement +=1
contributor.points += rewardvalue
contributor.save()
stat = Stat.objects.get(tag='initial')
stat.announcement_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
def Materials(request,department=None,coursecode=None):
if request.method =='GET':
form= MaterialForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
if not request.user.is_authenticated:
return redirect(reverse('login'))
form = MaterialForm(request.POST,request.FILES)
if(form.is_valid()):
obj = Material()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.material +=1
contributor.points += rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
stat = Stat.objects.get(tag='initial')
stat.material_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
@login_required
def FeedbackView(request):
if request.method =='GET':
form= FeedbackForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
form = FeedbackForm(request.POST,request.FILES)
if(form.is_valid()):
obj = Feedback()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.feedback = form.cleaned_data["feedback"]
obj.author=request.user
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.feedback +=1
contributor.points += 2*rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
return render(request,"form.html",context={'feedback':True,'message':"Thanks for your valuable feedback. We will be working on your query."})
print(form.errors)
return redirect(reverse("course"))
@login_required
def ExamPaperView(request,department=None,coursecode=None):
if request.method =='GET':
form= ExamPaperForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
form = ExamPaperForm(request.POST,request.FILES)
if(form.is_valid()):
obj = ExamPaper()
obj.files = form.cleaned_data["files"]
obj.term = form.cleaned_data["term"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.paper +=1
contributor.points += rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
stat = Stat.objects.get(tag='initial')
stat.paper_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
def DepartmentView(request,department=None,year=0,semester=0):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
year = int(year)
semester = int(semester)
if year < 1:
# course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).order_by('semester')
return render(request,"years.html",context={'department':department})
else:
if semester == 0 :
course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).filter(semester__in=[(2*year -1),(2*year)])
print(course)
else:
course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).filter(semester=semester)
return render(request,"department.html",context={"department":dept,"courses":course})
def CourseView(request,department=None,coursecode=None):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
course = get_object_or_404(Course,code=coursecode)
announcements = Announcement.objects.filter(course=course)
materials = Material.objects.filter(course=course)
papers = ExamPaper.objects.filter(course=course)
try:
bookmark = Bookmark.objects.get(course=course,user=request.user)
except:
bookmark = None
is_bookmarked = True if bookmark else False
return render(request,"course.html",context={"department":dept,"course":course,"announcements":announcements,"materials":materials,"papers":papers,"is_bookmarked":is_bookmarked})
@login_required
def FeedView(request):
if request.method=='GET':
bookmarks =Bookmark.objects.filter(user=request.user)
bookmarkcourses = Bookmark.objects.select_related('course').filter(user=request.user)
courselist=[]
for bookmark in bookmarks:
courselist.append(bookmark.course)
start_from = int(request.GET.get('start_from',0))
announcements = Announcement.objects.select_related('author').filter(course__in =courselist ).order_by('-updated_on')[start_from*6:start_from*6+6]
return render(request,"feed.html",context={"feed":announcements,"next":start_from+1})
@login_required
def BookmarkView(request):
if request.method =='POST':
course = request.POST.get('course')
user = request.POST.get('user')
course_obj = get_object_or_404(Course,id=course)
try:
bookmark = Bookmark.objects.get(course=course,user=request.user)
except:
bookmark = None
if bookmark is not None:
bookmark.delete()
else:
obj = Bookmark()
obj.course = course_obj
obj.user = request.user
obj.save()
return HttpResponse(json.dumps({"success":True}),content_type='application/json')
|
|
import os
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
import itertools
import pandas as pd
import numpy as np
from datetime import datetime
# This project
import stroke_cleaning
colors = itertools.cycle(["r", "b", "g", "k", "c", "m", "y"])
#def plot_features_from_audio(audio, featuresXY=('zrc', 'centroid')):
def plot_features_from_audio(audio, featuresXY=('cm0', 'sm0')):
"""Plot the features from the audio sample object"""
feature_dic = audio.get_features()
features = feature_dic['feature_table']
xfeat, yfeat = featuresXY
# Index of features
xidx = feature_dic['feature_names'].index(xfeat)
yidx = feature_dic['feature_names'].index(yfeat)
# Plot
plt.scatter(features[:, xidx], features[:, yidx],
color=next(colors), s=70, alpha=0.5)
#plt.xlabel(feature_dic['feature_names'][0])
#plt.ylabel(feature_dic['feature_names'][1])
plt.xlabel(xfeat)
plt.ylabel(yfeat)
plt.grid()
def plot_features_from_list(audio_list, label_list=None, good_range_list=None, **kwargs):
"""Plot features for audio signal in the given list."""
if good_range_list is None:
good_range_list = [None]*len(audio_list)
if label_list is None:
label_list = [os.path.basename(x) for x in audio_list]
fake_stroke_onset = kwargs.get("fake_stroke_onset", False)
featuresXY = kwargs.get('featuresXY', ('zrc', 'centroid'))
xfeat, yfeat = featuresXY
print('Fake stroke is {}'.format(fake_stroke_onset))
for iaudiofile, ilabel, igood_range in zip(
audio_list, label_list, good_range_list):
iaudio = stroke_cleaning.audio_sample(iaudiofile, igood_range)
# Strokes are either searched or just regular samples
if fake_stroke_onset is not False:
print('Using fake stroke')
iaudio.set_fake_regular_offsets(fake_stroke_onset)
else:
iaudio.isolate_strokes()
# Features
ifeature_dic = iaudio.get_features()
ifeatures = ifeature_dic['feature_table']
print('features shape: {}'.format(ifeatures.shape))
del(iaudio)
# Index of features
xidx = ifeature_dic['feature_names'].index(xfeat)
yidx = ifeature_dic['feature_names'].index(yfeat)
# Plot
plt.scatter(ifeatures[:, xidx], ifeatures[:, yidx],
color=next(colors), s=70, label=ilabel, alpha=0.5)
plt.xlabel(xfeat)
plt.ylabel(yfeat)
plt.grid()
plt.legend(loc='best')
def show_features_from_list(audio_list, label_list=None, good_range_list=None, **kwargs):
"""Plot features for audio signal in the given list."""
fig = plt.figure()
plot_features_from_list(audio_list, label_list, good_range_list, **kwargs)
fig.show()
raw_input('press enter when finished...')
def get_features_from_path_list(path_list, goodrange_list, **kwargs):
"""Return feature dict from path list"""
fake_stroke_onset = kwargs.get('fake_stroke_onset', False)
feature_dic = {}
for iaudiofile, igood_range in zip(path_list, goodrange_list):
iaudio = stroke_cleaning.audio_sample(iaudiofile, igood_range)
# Strokes are either searched or just regular samples
if fake_stroke_onset is not False:
print('Using fake stroke')
iaudio.set_fake_regular_offsets(fake_stroke_onset)
else:
iaudio.isolate_strokes()
ifeature_dic = iaudio.get_features()
if not feature_dic.has_key('feature_names'):
feature_dic.update(ifeature_dic)
else:
feature_dic['feature_table'] =\
np.vstack((feature_dic['feature_table'],
ifeature_dic['feature_table']))
return feature_dic
def show_grouped_features(group_dict, **kwargs):
"""Plot the features from the group"""
featuresXY = kwargs.get('featuresXY', ('zrc', 'centroid'))
xfeat, yfeat = featuresXY
fig = plt.figure()
for igroup in group_dict.keys():
ifeature_dic =\
get_features_from_path_list(group_dict[igroup]['paths'],
group_dict[igroup]['goodranges'])
ifeatures = ifeature_dic['feature_table']
print('features shape: {}'.format(ifeatures.shape))
# Index of features
xidx = ifeature_dic['feature_names'].index(xfeat)
yidx = ifeature_dic['feature_names'].index(yfeat)
# Plot
plt.scatter(ifeatures[:, xidx], ifeatures[:, yidx],
color=next(colors), s=70, label=igroup, alpha=0.5)
plt.xlabel(xfeat)
plt.ylabel(yfeat)
plt.grid()
plt.legend(loc='best')
fig.show()
raw_input('press enter when finished...')
def show_features_from_dic(audio_dic, **kwargs):
"""Plot features for audio signal in the given dict."""
recording_list = []
good_range_list = []
label_list = []
for ilabel, idic in audio_dic.items():
label_list.append(ilabel)
recording_list.append(idic['audio_file'])
good_range_list.append(idic['good_range'])
show_features_from_list(recording_list, label_list, good_range_list, **kwargs)
def show_players_features(**kwargs):
"""Plot the features grouped by file selection"""
df_data = pd.read_csv('datainfo.csv', sep=' ', na_values='None')
print(df_data.head())
grouping_dict = {}
for iplayer in df_data.player.unique():
ipathlist = df_data[df_data.player==iplayer]['path'].tolist()
igoodrangelist = df_data[df_data.player==iplayer]['goodrange'].tolist()
grouping_dict[iplayer] = {'paths': ipathlist,
'goodranges': igoodrangelist}
show_grouped_features(grouping_dict, **kwargs)
def audio_report(fname):
"""Plot summary of the given audio file."""
audio = stroke_cleaning.audio_sample(fname)
audio.set_fake_regular_offsets(1)
fig_sig = plt.figure()
audio.plot_signal(x_axis_type='sample')
fig_sig.show()
fig_feat = plt.figure()
plot_features_from_audio(audio)
plt.grid()
fig_feat.show()
raw_input('press enter when finished...')
def show_multiaudio(fnames, good_ranges = None, **kwargs):
"""Show summary of all the audio files in the given list."""
featuresXY = kwargs.get('featuresXY',('zrc', 'centroid'))
xfeat, yfeat = featuresXY
if good_ranges is None:
good_ranges = [None]*len(fnames)
fig_feat = plt.figure(1)
fig_raw = plt.figure(2)
for idx, (iaudiofile, igood_range) in enumerate(zip(fnames, good_ranges)):
print(igood_range)
try:
igood_range = igood_range.split('-')
igood_range = (int(igood_range[0]), int(igood_range[1]))
except AttributeError:
igood_range = None
print(igood_range)
iaudio = stroke_cleaning.audio_sample(iaudiofile, igood_range)
ilabel = os.path.basename(iaudiofile)
iaudio.set_fake_regular_offsets(0.5)
# Features
ifeature_dic = iaudio.get_features()
# Index of features
xidx = ifeature_dic['feature_names'].index(xfeat)
yidx = ifeature_dic['feature_names'].index(yfeat)
plt.figure(2)
iax = fig_raw.add_subplot(len(fnames),1,idx)
iaudio.plot_signal(label=os.path.basename(iaudiofile).split('.')[0])
iax.text(0.05, 0.95, ilabel,
verticalalignment='top', horizontalalignment='left',
transform=iax.transAxes, fontsize=15)
ifeatures = ifeature_dic['feature_table']
plt.figure(1)
plt.scatter(ifeatures[:, xidx], ifeatures[:, yidx],
color=next(colors), s=70, label=ilabel, alpha=0.5)
plt.xlabel(xfeat)
plt.ylabel(yfeat)
plt.legend(loc='best')
fig_feat.show()
fig_raw.show()
raw_input('press enter when finished...')
def show_day(daystr, **kwargs):
"""Show summary of all the audiofile from a given day."""
featuresXY = kwargs.get('featuresXY', ('zrc', 'centroid'))
df_data = pd.read_csv('datainfo.csv', sep=' ', na_values='None',
dtype={'date': datetime})
fnames = df_data[df_data.date == daystr].path.tolist()
good_ranges = df_data[df_data.date == daystr].goodrange.tolist()
show_multiaudio(fnames, good_ranges, featuresXY=featuresXY)
if __name__ == '__main__':
#recording_list = (
# '/Users/jean-francoisrajotte/myaudio/marina.m4a',
# '/Users/jean-francoisrajotte/myaudio/jfraj.m4a',
# )
#show_features_from_list(recording_list)
recording_dic = {}
recording_dic['marina'] = {
'audio_file': '/Users/jean-francoisrajotte/myaudio/marina.m4a',
'good_range': None}
recording_dic['jfraj'] = {
'audio_file': '/Users/jean-francoisrajotte/myaudio/jfraj.m4a',
'good_range': (95000, -400000)}
recording_dic['marina_srON'] = {
'audio_file': '/Users/jean-francoisrajotte/myaudio/astring_shoulder_rest_ON.m4a',
'good_range': None}
recording_dic['marina_srOFF'] = {
'audio_file': '/Users/jean-francoisrajotte/myaudio/astring_shoulder_rest_OFF.m4a',
'good_range': None}
recording_dic['marina_srON'] = {
'audio_file': '/Users/jean-francoisrajotte/myaudio/marina_20150507.m4a',
'good_range': None}
recording_dic['marina_srOFF'] = {
'audio_file': '/Users/jean-francoisrajotte/myaudio/marina_20150507_test.m4a',
'good_range': None}
#show_features_from_dic(recording_dic)
show_players_features(featuresXY=('cm3', 'sm0'))
#audio_dir = "/Users/jean-francoisrajotte/myaudio/alto_recordings/"
#audioname = os.path.join(audio_dir, 'marina_20150513_halfbow_testbow1.m4a')
#audio_report(audioname)
#show_day('20150623', featuresXY=('cm3', 'sm0'))
|
|
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# example synthetic children and summary provider for CFString (and related NSString class)
# the real code is part of the LLDB core
import lldb
import lldb.runtime.objc.objc_runtime
import lldb.formatters.Logger
def CFString_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = CFStringSynthProvider(valobj, dict)
if not provider.invalid:
try:
summary = provider.get_child_at_index(
provider.get_child_index("content"))
if isinstance(summary, lldb.SBValue):
summary = summary.GetSummary()
else:
summary = '"' + summary + '"'
except:
summary = None
if summary is None:
summary = '<variable is not NSString>'
return '@' + summary
return ''
def CFAttributedString_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
offset = valobj.GetTarget().GetProcess().GetAddressByteSize()
pointee = valobj.GetValueAsUnsigned(0)
summary = '<variable is not NSAttributedString>'
if pointee is not None and pointee != 0:
pointee = pointee + offset
child_ptr = valobj.CreateValueFromAddress(
"string_ptr", pointee, valobj.GetType())
child = child_ptr.CreateValueFromAddress(
"string_data",
child_ptr.GetValueAsUnsigned(),
valobj.GetType()).AddressOf()
provider = CFStringSynthProvider(child, dict)
if not provider.invalid:
try:
summary = provider.get_child_at_index(
provider.get_child_index("content")).GetSummary()
except:
summary = '<variable is not NSAttributedString>'
if summary is None:
summary = '<variable is not NSAttributedString>'
return '@' + summary
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFString.CFString_SummaryProvider NSString CFStringRef CFMutableStringRef")
debugger.HandleCommand(
"type summary add -F CFString.CFAttributedString_SummaryProvider NSAttributedString")
class CFStringSynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.update()
# children other than "content" are for debugging only and must not be
# used in production code
def num_children(self):
logger = lldb.formatters.Logger.Logger()
if self.invalid:
return 0
return 6
def read_unicode(self, pointer, max_len=2048):
logger = lldb.formatters.Logger.Logger()
process = self.valobj.GetTarget().GetProcess()
error = lldb.SBError()
pystr = u''
# cannot do the read at once because the length value has
# a weird encoding. better play it safe here
while max_len > 0:
content = process.ReadMemory(pointer, 2, error)
new_bytes = bytearray(content)
b0 = new_bytes[0]
b1 = new_bytes[1]
pointer = pointer + 2
if b0 == 0 and b1 == 0:
break
# rearrange bytes depending on endianness
# (do we really need this or is Cocoa going to
# use Windows-compatible little-endian even
# if the target is big endian?)
if self.is_little:
value = b1 * 256 + b0
else:
value = b0 * 256 + b1
pystr = pystr + unichr(value)
# read max_len unicode values, not max_len bytes
max_len = max_len - 1
return pystr
# handle the special case strings
# only use the custom code for the tested LP64 case
def handle_special(self):
logger = lldb.formatters.Logger.Logger()
if not self.is_64_bit:
# for 32bit targets, use safe ObjC code
return self.handle_unicode_string_safe()
offset = 12
pointer = self.valobj.GetValueAsUnsigned(0) + offset
pystr = self.read_unicode(pointer)
return self.valobj.CreateValueFromExpression(
"content", "(char*)\"" + pystr.encode('utf-8') + "\"")
# last resort call, use ObjC code to read; the final aim is to
# be able to strip this call away entirely and only do the read
# ourselves
def handle_unicode_string_safe(self):
return self.valobj.CreateValueFromExpression(
"content", "(char*)\"" + self.valobj.GetObjectDescription() + "\"")
def handle_unicode_string(self):
logger = lldb.formatters.Logger.Logger()
# step 1: find offset
if self.inline:
pointer = self.valobj.GetValueAsUnsigned(
0) + self.size_of_cfruntime_base()
if not self.explicit:
# untested, use the safe code path
return self.handle_unicode_string_safe()
else:
# a full pointer is skipped here before getting to the live
# data
pointer = pointer + self.pointer_size
else:
pointer = self.valobj.GetValueAsUnsigned(
0) + self.size_of_cfruntime_base()
# read 8 bytes here and make an address out of them
try:
char_type = self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType()
vopointer = self.valobj.CreateValueFromAddress(
"dummy", pointer, char_type)
pointer = vopointer.GetValueAsUnsigned(0)
except:
return self.valobj.CreateValueFromExpression(
"content", '(char*)"@\"invalid NSString\""')
# step 2: read Unicode data at pointer
pystr = self.read_unicode(pointer)
# step 3: return it
return pystr.encode('utf-8')
def handle_inline_explicit(self):
logger = lldb.formatters.Logger.Logger()
offset = 3 * self.pointer_size
offset = offset + self.valobj.GetValueAsUnsigned(0)
return self.valobj.CreateValueFromExpression(
"content", "(char*)(" + str(offset) + ")")
def handle_mutable_string(self):
logger = lldb.formatters.Logger.Logger()
offset = 2 * self.pointer_size
data = self.valobj.CreateChildAtOffset(
"content", offset, self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType())
data_value = data.GetValueAsUnsigned(0)
if self.explicit and self.unicode:
return self.read_unicode(data_value).encode('utf-8')
else:
data_value = data_value + 1
return self.valobj.CreateValueFromExpression(
"content", "(char*)(" + str(data_value) + ")")
def handle_UTF8_inline(self):
logger = lldb.formatters.Logger.Logger()
offset = self.valobj.GetValueAsUnsigned(
0) + self.size_of_cfruntime_base()
if not self.explicit:
offset = offset + 1
return self.valobj.CreateValueFromAddress(
"content", offset, self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar)).AddressOf()
def handle_UTF8_not_inline(self):
logger = lldb.formatters.Logger.Logger()
offset = self.size_of_cfruntime_base()
return self.valobj.CreateChildAtOffset(
"content", offset, self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar).GetPointerType())
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger >> "Querying for child [" + str(index) + "]"
if index == 0:
return self.valobj.CreateValueFromExpression(
"mutable", str(int(self.mutable)))
if index == 1:
return self.valobj.CreateValueFromExpression("inline",
str(int(self.inline)))
if index == 2:
return self.valobj.CreateValueFromExpression(
"explicit", str(int(self.explicit)))
if index == 3:
return self.valobj.CreateValueFromExpression(
"unicode", str(int(self.unicode)))
if index == 4:
return self.valobj.CreateValueFromExpression(
"special", str(int(self.special)))
if index == 5:
# we are handling the several possible combinations of flags.
# for each known combination we have a function that knows how to
# go fetch the data from memory instead of running code. if a string is not
# correctly displayed, one should start by finding a combination of flags that
# makes it different from these known cases, and provide a new reader function
# if this is not possible, a new flag might have to be made up (like the "special" flag
# below, which is not a real flag in CFString), or alternatively one might need to use
# the ObjC runtime helper to detect the new class and deal with it accordingly
# print 'mutable = ' + str(self.mutable)
# print 'inline = ' + str(self.inline)
# print 'explicit = ' + str(self.explicit)
# print 'unicode = ' + str(self.unicode)
# print 'special = ' + str(self.special)
if self.mutable:
return self.handle_mutable_string()
elif self.inline and self.explicit and \
self.unicode == False and self.special == False and \
self.mutable == False:
return self.handle_inline_explicit()
elif self.unicode:
return self.handle_unicode_string()
elif self.special:
return self.handle_special()
elif self.inline:
return self.handle_UTF8_inline()
else:
return self.handle_UTF8_not_inline()
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
logger >> "Querying for child ['" + str(name) + "']"
if name == "content":
return self.num_children() - 1
if name == "mutable":
return 0
if name == "inline":
return 1
if name == "explicit":
return 2
if name == "unicode":
return 3
if name == "special":
return 4
# CFRuntimeBase is defined as having an additional
# 4 bytes (padding?) on LP64 architectures
# to get its size we add up sizeof(pointer)+4
# and then add 4 more bytes if we are on a 64bit system
def size_of_cfruntime_base(self):
logger = lldb.formatters.Logger.Logger()
return self.pointer_size + 4 + (4 if self.is_64_bit else 0)
# the info bits are part of the CFRuntimeBase structure
# to get at them we have to skip a uintptr_t and then get
# at the least-significant byte of a 4 byte array. If we are
# on big-endian this means going to byte 3, if we are on
# little endian (OSX & iOS), this means reading byte 0
def offset_of_info_bits(self):
logger = lldb.formatters.Logger.Logger()
offset = self.pointer_size
if not self.is_little:
offset = offset + 3
return offset
def read_info_bits(self):
logger = lldb.formatters.Logger.Logger()
cfinfo = self.valobj.CreateChildAtOffset(
"cfinfo",
self.offset_of_info_bits(),
self.valobj.GetType().GetBasicType(
lldb.eBasicTypeChar))
cfinfo.SetFormat(11)
info = cfinfo.GetValue()
if info is not None:
self.invalid = False
return int(info, 0)
else:
self.invalid = True
return None
# calculating internal flag bits of the CFString object
# this stuff is defined and discussed in CFString.c
def is_mutable(self):
logger = lldb.formatters.Logger.Logger()
return (self.info_bits & 1) == 1
def is_inline(self):
logger = lldb.formatters.Logger.Logger()
return (self.info_bits & 0x60) == 0
# this flag's name is ambiguous, it turns out
# we must skip a length byte to get at the data
# when this flag is False
def has_explicit_length(self):
logger = lldb.formatters.Logger.Logger()
return (self.info_bits & (1 | 4)) != 4
# probably a subclass of NSString. obtained this from [str pathExtension]
# here info_bits = 0 and Unicode data at the start of the padding word
# in the long run using the isa value might be safer as a way to identify this
# instead of reading the info_bits
def is_special_case(self):
logger = lldb.formatters.Logger.Logger()
return self.info_bits == 0
def is_unicode(self):
logger = lldb.formatters.Logger.Logger()
return (self.info_bits & 0x10) == 0x10
# preparing ourselves to read into memory
# by adjusting architecture-specific info
def adjust_for_architecture(self):
logger = lldb.formatters.Logger.Logger()
self.pointer_size = self.valobj.GetTarget().GetProcess().GetAddressByteSize()
self.is_64_bit = self.pointer_size == 8
self.is_little = self.valobj.GetTarget().GetProcess(
).GetByteOrder() == lldb.eByteOrderLittle
# reading info bits out of the CFString and computing
# useful values to get at the real data
def compute_flags(self):
logger = lldb.formatters.Logger.Logger()
self.info_bits = self.read_info_bits()
if self.info_bits is None:
return
self.mutable = self.is_mutable()
self.inline = self.is_inline()
self.explicit = self.has_explicit_length()
self.unicode = self.is_unicode()
self.special = self.is_special_case()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
self.compute_flags()
|
|
from __future__ import unicode_literals
from __future__ import print_function
from .context import dataindex
from .context.tools import to_expression
from .compat import implements_to_string, text_type
from .context.missing import is_missing
from .interface import unproxy
from pyparsing import (
Word,
WordEnd,
nums,
alphas,
Combine,
oneOf,
opAssoc,
operatorPrecedence,
QuotedString,
Literal,
ParserElement,
ParseException,
Forward,
Group,
Suppress,
Optional,
Regex,
)
from sqlalchemy import and_, or_, func, not_
import operator
import re
import threading
def dbobject(obj):
return getattr(obj, "__moyadbobject__", lambda: obj)()
@implements_to_string
class DBExpressionError(Exception):
hide_py_traceback = True
error_type = "Database expression error"
def __init__(self, exp, msg=None, col=None):
self.exp = exp
self.msg = msg or ""
self.col = col
def __str__(self):
return self.msg
def __moyaconsole__(self, console):
indent = ""
console(indent + self.exp, bold=True, fg="magenta").nl()
if self.col:
console(indent)(" " * (self.col - 1) + "^", bold=True, fg="red").nl()
class DBEvalError(Exception):
pass
def pairs(tokenlist):
"""Converts a list in to a sequence of paired values"""
return zip(tokenlist[::2], tokenlist[1::2])
class ExpressionContext(object):
def __init__(self, exp):
self.exp = exp
self._joins = []
super(ExpressionContext, self).__init__()
def __repr__(self):
return "<expressioncontext '{}'>".format(self.exp)
def add_joins(self, joins):
self._joins.append(joins)
def process_qs(self, qs):
# TODO: Is this deprecated now?
for j in self._joins:
if isinstance(j, (tuple, list)):
qs = qs.join(*j)
else:
qs = qs.join(j)
return qs
class ExpressionModifiers(object):
def abs(self, context, v):
return func.abs(v)
def count(self, context, v):
return func.count(v)
def sum(self, context, v):
return func.sum(v)
def min(self, context, v):
return func.min(v)
def max(self, context, v):
return func.max(v)
def lower(self, context, v):
return func.lower(v)
class EvalModifierOp(object):
modifiers = ExpressionModifiers()
def __init__(self, tokens):
filter, value = tokens[0]
self.value = value
self._eval = value.eval
try:
self.filter_func = getattr(self.modifiers, filter[:-1])
except AttributeError:
raise DBEvalError("unknown filter type '%s'" % filter)
def eval(self, archive, context, app, exp_context):
return self.filter_func(context, self._eval(archive, context, app, exp_context))
class EvalMultOp(object):
"Class to evaluate multiplication and division expressions"
ops = {
"*": operator.imul,
"/": operator.itruediv,
"//": operator.ifloordiv,
"%": operator.imod,
}
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
ops = self.ops
self.operator_eval = [(ops[op], val.eval) for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
prod = self._eval(archive, context, app, exp)
for op_func, _eval in self.operator_eval:
prod = op_func(prod, _eval(archive, context, app, exp_context))
return prod
class EvalAddOp(object):
"Class to evaluate addition and subtraction expressions"
ops = {"+": operator.add, "-": operator.sub}
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
ops = self.ops
self.operator_eval = [(ops[op], val.eval) for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
sum = self._eval(archive, context, app, exp_context)
for op_func, _eval in self.operator_eval:
sum = op_func(sum, _eval(archive, context, app, exp_context))
return sum
class EvalConstant(object):
"""Evaluates a constant"""
constants = {"None": None, "True": True, "False": False, "yes": True, "no": False}
def __init__(self, tokens):
self.key = tokens[0]
self.value = self.constants[self.key]
def eval(self, archive, context, app, exp_context):
return self.value
class EvalInteger(object):
"Class to evaluate an integer value"
def __init__(self, tokens):
self.value = int(tokens[0])
def eval(self, archive, context, app, exp_context):
return self.value
class EvalReal(object):
"Class to evaluate a real number value"
def __init__(self, tokens):
self.value = float(tokens[0])
def eval(self, archive, context, app, exp_context):
return self.value
class EvalString(object):
"Class to evaluate a string"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self, archive, context, app, exp_context):
return self.value
def qs(value):
if hasattr(value, "__moyadbobject__"):
value = value.__moyadbobject__()
if hasattr(value, "_get_query_set"):
value = value._get_query_set()
if isinstance(value, list):
return [getattr(v, "id", v) for v in value]
return value
class EvalVariable(object):
"Class to evaluate a parsed variable"
def __init__(self, tokens):
key = tokens[0]
self.index = dataindex.parse(key)
def eval(self, archive, context, app, exp_context):
value = context[self.index]
if is_missing(value):
raise DBEvalError(
"Database expression value '{}' is missing from the context".format(
self.index
)
)
return dbobject(unproxy(value))
class EvalModelReference(object):
"""Gets a model reference"""
_ref_model_ref = re.compile("^(.*?#.*?)(?:\.(.*?))?$")
def __init__(self, tokens):
self.index = tokens[0]
def eval(self, archive, context, app, exp_context):
model_ref, index = self._ref_model_ref.match(self.index).groups()
app = app or context.get(".app", None)
if app is None:
raise DBEvalError("unable to get app from '{}'".format(self.index))
if index is None:
app, model_element = app.get_element(model_ref)
try:
table_class = model_element.get_table_class(app)
except Exception as e:
raise DBEvalError(str(e))
return table_class
index = list(dataindex.parse(index))
app, model_element = app.get_element(model_ref)
try:
table_class = model_element.get_table_class(app)
except Exception as e:
raise DBEvalError(str(e))
try:
model_reference_result = table_class._get_index(
archive, context, app, exp_context, index
)
except (KeyError, AttributeError):
raise DBEvalError('no column or object called "{}"'.format(self.index))
else:
return model_reference_result
class EvalComparisonOp(object):
"Class to evaluate comparison expressions"
@classmethod
def match_re(cls, a, b):
return bool(b.match(a))
@classmethod
def escape_like(cls, like, _should_escape="\\%_".__contains__):
"""escape LIKE comparisons"""
if not isinstance(like, text_type):
return like
return "".join("\\" + c if _should_escape(c) else c for c in like)
def in_(context, a, b):
if hasattr(b, "__moyadbsubselect__"):
sub_b = b.__moyadbsubselect__(context)
if sub_b is not None:
b = sub_b
a = qs(a)
try:
return a.in_(qs(b))
except:
raise DBEvalError(
"db expression 'in' operator works on columns only (did you mean .id)?"
)
def notin_(context, a, b):
if hasattr(b, "__moyadbsubselect__"):
sub_b = b.__moyadbsubselect__(context)
if sub_b is not None:
b = sub_b
a = qs(a)
try:
return a.notin_(qs(b))
except:
raise DBEvalError(
"db expression 'not in' operator works on columns only (did you mean .id)?"
)
def contains_(context, a, b):
try:
return qs(a).contains(qs(b))
except:
raise DBEvalError(
"value {} is an invalid operand for the 'contains' operator".format(
to_expression(context, b)
)
)
def icontains_(context, a, b):
if not isinstance(b, text_type):
raise DBEvalError(
"icontains right hand side should be a string, not {}".format(
context.to_expr(b)
)
)
b = "%{}%".format(EvalComparisonOp.escape_like(b))
try:
return qs(a).like(b)
except:
raise DBEvalError(
"{} may not be used with 'icontains' operator".format(
context.to_expr(a)
)
)
def ieq(context, a, b):
if not isinstance(b, text_type):
raise DBEvalError(
"case insensitive equality operator (~=) right hand side should be a string, not {}".format(
context.to_expr(b)
)
)
return qs(a).ilike(EvalComparisonOp.escape_like(b), escape="\\")
opMap = {
"<": lambda c, a, b: qs(a) < qs(b),
"lt": lambda c, a, b: qs(a) < qs(b),
"<=": lambda c, a, b: qs(a) <= qs(b),
"lte": lambda c, a, b: qs(a) <= qs(b),
">": lambda c, a, b: qs(a) > qs(b),
"gt": lambda c, a, b: qs(a) > qs(b),
">=": lambda c, a, b: qs(a) >= qs(b),
"gte": lambda c, a, b: qs(a) >= qs(b),
"!=": lambda c, a, b: qs(a) != qs(b),
"==": lambda c, a, b: qs(a) == qs(b),
"like": lambda c, a, b: qs(a).like(qs(b)),
"ilike": lambda c, a, b: qs(a).ilike(qs(b)),
# "~=": lambda c, a, b: qs(a).ilike(qs(EvalComparisonOp.escape_like(b)), escape='\\'),
"~=": ieq,
"^=": lambda c, a, b: qs(a).startswith(qs(b)),
"$=": lambda c, a, b: qs(a).endswith(qs(b)),
"in": in_,
"not in": notin_,
"contains": contains_,
"icontains": icontains_,
# "icontains": lambda c, a, b: qs(a).like('%' + EvalComparisonOp.escape_like(b) + '%', escape='\\')
}
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [
(self.opMap[op], val.eval) for op, val in pairs(self.value[1:])
]
def eval(self, archive, context, app, exp_context):
val1 = self._eval(archive, context, app, exp_context)
for op_func, _eval in self.operator_eval:
val2 = _eval(archive, context, app, exp_context)
val1 = op_func(context, val1, val2)
return val1
class EvalLogicOpAND(object):
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
val1 = self._eval(archive, context, app, exp_context)
for _eval in self.operator_eval:
val2 = _eval(archive, context, app, exp_context)
val1 = and_(val1, val2)
return val1
class EvalLogicOpOR(object):
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
val1 = self._eval(archive, context, app, exp_context)
for _eval in self.operator_eval:
val2 = _eval(archive, context, app, exp_context)
val1 = or_(val1, val2)
return val1
class EvalGroupOp(object):
def __init__(self, tokens):
self._evals = [t.eval for t in tokens[0][0::2]]
def eval(self, archive, context, app, exp_context):
val = [eval(archive, context, app, exp_context) for eval in self._evals]
return val
class EvalNotOp(object):
"""Class to evaluate expressions with logical NOT"""
def __init__(self, tokens):
self._eval = tokens[0][1].eval
def eval(self, archive, context, app, exp_context):
return not_(self._eval(archive, context, app, exp_context))
integer = Word(nums)
real = Combine(Word(nums) + "." + Word(nums))
constant = (
Literal("True")
| Literal("False")
| Literal("None")
| Literal("yes")
| Literal("no")
) + WordEnd()
model_reference = Regex(r"([\w\.]*#[\w\.]+)")
variable = Regex(r"([a-zA-Z0-9\._]+)")
string = QuotedString('"', escChar="\\") | QuotedString("'", escChar="\\")
operand = model_reference | real | integer | constant | string | variable
plusop = oneOf("+ -")
multop = oneOf("* / // %")
groupop = Literal(",")
expr = Forward()
notop = Literal("not") + WordEnd()
modifier = Combine(Word(alphas + nums) + ":")
integer.setParseAction(EvalInteger)
real.setParseAction(EvalReal)
string.setParseAction(EvalString)
constant.setParseAction(EvalConstant)
variable.setParseAction(EvalVariable)
model_reference.setParseAction(EvalModelReference)
comparisonop = (
oneOf("< <= > >= != == ~= ^= $=")
| (Literal("not in") + WordEnd())
| (oneOf("in lt lte gt gte matches contains icontains like ilike") + WordEnd())
)
logicopOR = Literal("or") + WordEnd()
logicopAND = Literal("and") + WordEnd()
expr << operatorPrecedence(
operand,
[
(notop, 1, opAssoc.RIGHT, EvalNotOp),
(modifier, 1, opAssoc.RIGHT, EvalModifierOp),
(multop, 2, opAssoc.LEFT, EvalMultOp),
(plusop, 2, opAssoc.LEFT, EvalAddOp),
(comparisonop, 2, opAssoc.LEFT, EvalComparisonOp),
(logicopAND, 2, opAssoc.LEFT, EvalLogicOpAND),
(logicopOR, 2, opAssoc.LEFT, EvalLogicOpOR),
(groupop, 2, opAssoc.LEFT, EvalGroupOp),
],
)
@implements_to_string
class DBExpression(object):
exp_cache = {}
_lock = threading.Lock()
def __init__(self, exp):
self.exp = exp
def __repr__(self):
return '<DBExpression "%s">' % self.exp
def __str__(self):
return self.exp
def eval(self, archive, context, app=None):
exp_context = ExpressionContext(self.exp)
try:
eval = self.compile_cache(self.exp)
result = eval(archive, context, app, exp_context)
except DBEvalError as e:
raise DBExpressionError(self.exp, text_type(e))
return result
def eval2(self, archive, context, app=None):
exp_context = ExpressionContext(self.exp)
try:
eval = self.compile_cache(self.exp)
result = eval(archive, context, app, exp_context)
except DBEvalError as e:
raise DBExpressionError(self.exp, text_type(e))
return result, exp_context
def compile(self):
return self.compile_cache(self.exp)
def compile_cache(self, exp):
with self._lock:
try:
return self.exp_cache[exp]
except KeyError:
try:
compiled_exp = expr.parseString(exp, parseAll=True)
except ParseException as e:
raise DBExpressionError(exp, text_type(e), col=e.col)
eval = self.exp_cache[exp] = compiled_exp[0].eval
return eval
if __name__ == "__main__":
"""
<db:filter model="#TagDB">#TagDB.name==name and #TagDB.company.pk==company_pk</db:filter>
"""
exp = DBExpression("moya.auth#User.username=='will'")
print(exp.compile())
exp = DBExpression("auth#User.username=='will'")
print(exp.compile())
exp = DBExpression(
"comments#Comment.namespace == app.name and comments#Comment.object in comment_keys"
)
print(exp.compile())
exp = DBExpression("#CommentObject.count + 1")
print(exp.compile)
|
|
# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.volume.drivers.emc import xtremio
typ2id = {'volumes': 'vol-id',
'snapshots': 'vol-id',
'initiators': 'initiator-id',
'initiator-groups': 'ig-id',
'lun-maps': 'mapping-id',
'consistency-groups': 'cg-id',
'consistency-group-volumes': 'cg-vol-id',
}
xms_data = {'xms': {1: {'version': '4.0.0'}},
'clusters': {1: {'name': 'brick1',
'sys-sw-version': "4.0.0-devel_ba23ee5381eeab73",
'ud-ssd-space': '8146708710',
'ud-ssd-space-in-use': '708710',
'vol-size': '29884416',
'chap-authentication-mode': 'disabled',
'chap-discovery-mode': 'disabled',
"index": 1,
},
},
'target-groups': {'Default': {"index": 1, },
},
'iscsi-portals': {'10.205.68.5/16':
{"port-address":
"iqn.2008-05.com.xtremio:001e67939c34",
"ip-port": 3260,
"ip-addr": "10.205.68.5/16",
"name": "10.205.68.5/16",
"index": 1,
},
},
'targets': {'X1-SC2-fc1': {'index': 1, "name": "X1-SC2-fc1",
"port-address":
"21:00:00:24:ff:57:b2:36",
'port-state': 'up',
},
'X1-SC2-fc2': {'index': 2, "name": "X1-SC2-fc2",
"port-address":
"21:00:00:24:ff:57:b2:55",
'port-state': 'up',
}
},
'volumes': {},
'initiator-groups': {},
'initiators': {},
'lun-maps': {},
'consistency-groups': {},
'consistency-group-volumes': {},
}
def get_xms_obj_by_name(typ, name):
for item in xms_data[typ].values():
if 'name' in item and item['name'] == name:
return item
raise exception.NotFound()
def clean_xms_data():
xms_data['volumes'] = {}
xms_data['initiator-groups'] = {}
xms_data['initiators'] = {}
xms_data['lun-maps'] = {}
xms_data['consistency-group-volumes'] = {}
xms_data['consistency-groups'] = {}
def fix_data(data, object_type):
d = {}
for key, value in data.items():
if 'name' in key:
key = 'name'
d[key] = value
if object_type == 'lun-maps':
d['lun'] = 1
d[typ2id[object_type]] = ["a91e8c81c2d14ae4865187ce4f866f8a",
d.get('name'),
len(xms_data.get(object_type, [])) + 1]
d['index'] = len(xms_data[object_type]) + 1
return d
def get_xms_obj_key(data):
for key in data.keys():
if 'name' in key:
return key
def get_obj(typ, name, idx):
if name:
return {"content": get_xms_obj_by_name(typ, name)}
elif idx:
if idx not in xms_data.get(typ, {}):
raise exception.NotFound()
return {"content": xms_data[typ][idx]}
def xms_request(object_type='volumes', request_typ='GET', data=None,
name=None, idx=None, ver='v1'):
if object_type == 'snapshots':
object_type = 'volumes'
try:
res = xms_data[object_type]
except KeyError:
raise exception.VolumeDriverException
if request_typ == 'GET':
if name or idx:
return get_obj(object_type, name, idx)
else:
if data and data.get('full') == 1:
return {object_type: list(res.values())}
else:
return {object_type: [{"href": "/%s/%d" % (object_type,
obj['index']),
"name": obj.get('name')}
for obj in res.values()]}
elif request_typ == 'POST':
data = fix_data(data, object_type)
name_key = get_xms_obj_key(data)
try:
if name_key and get_xms_obj_by_name(object_type, data[name_key]):
raise (exception
.VolumeBackendAPIException
('Volume by this name already exists'))
except exception.NotFound:
pass
data['index'] = len(xms_data[object_type]) + 1
xms_data[object_type][data['index']] = data
# find the name key
if name_key:
data['name'] = data[name_key]
if object_type == 'lun-maps':
data['ig-name'] = data['ig-id']
return {"links": [{"href": "/%s/%d" %
(object_type, data[typ2id[object_type]][2])}]}
elif request_typ == 'DELETE':
if object_type == 'consistency-group-volumes':
data = [cgv for cgv in
xms_data['consistency-group-volumes'].values()
if cgv['vol-id'] == data['vol-id']
and cgv['cg-id'] == data['cg-id']][0]
else:
data = get_obj(object_type, name, idx)['content']
if data:
del xms_data[object_type][data['index']]
else:
raise exception.NotFound()
elif request_typ == 'PUT':
obj = get_obj(object_type, name, idx)['content']
data = fix_data(data, object_type)
del data['index']
obj.update(data)
def xms_bad_request(object_type='volumes', request_typ='GET', data=None,
name=None, idx=None, ver='v1'):
if request_typ == 'GET':
raise exception.NotFound()
elif request_typ == 'POST':
raise exception.VolumeBackendAPIException('Failed to create ig')
def xms_failed_rename_snapshot_request(object_type='volumes',
request_typ='GET', data=None,
name=None, idx=None, ver='v1'):
if request_typ == 'POST':
xms_data['volumes'][27] = {}
return {
"links": [
{
"href": "https://host/api/json/v2/types/snapshots/27",
"rel": "self"}]}
elif request_typ == 'PUT':
raise exception.VolumeBackendAPIException(data='Failed to delete')
elif request_typ == 'DELETE':
del xms_data['volumes'][27]
class D(dict):
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
return dict.update(self, *args, **kwargs)
class CommonData(object):
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["123456789012345", "123456789054321"],
'wwnns': ["223456789012345", "223456789054321"],
'host': 'fakehost',
}
test_volume = {'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '192eb39b-6c2f-420c-bae3-3cfd117f0001',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id':
'192eb39b-6c2f-420c-bae3-3cfd117f0345',
}
test_snapshot = D()
test_snapshot.update({'name': 'snapshot1',
'size': 1,
'id': '192eb39b-6c2f-420c-bae3-3cfd117f0002',
'volume_name': 'vol-vol1',
'volume_id': '192eb39b-6c2f-420c-bae3-3cfd117f0001',
'project_id': 'project',
'consistencygroup_id':
'192eb39b-6c2f-420c-bae3-3cfd117f0345',
})
test_snapshot.__dict__.update(test_snapshot)
test_volume2 = {'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '192eb39b-6c2f-420c-bae3-3cfd117f0004',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2',
'display_description': 'test volume 2',
'volume_type_id': None,
'consistencygroup_id':
'192eb39b-6c2f-420c-bae3-3cfd117f0345',
}
test_clone = {'name': 'clone1',
'size': 1,
'volume_name': 'vol3',
'id': '192eb39b-6c2f-420c-bae3-3cfd117f0003',
'provider_auth': None,
'project_id': 'project',
'display_name': 'clone1',
'display_description': 'volume created from snapshot',
'volume_type_id': None,
'consistencygroup_id':
'192eb39b-6c2f-420c-bae3-3cfd117f0345',
}
unmanaged1 = {'id': 'unmanaged1',
'name': 'unmanaged1',
'size': 3,
}
context = {'user': 'admin', }
group = {'id': '192eb39b-6c2f-420c-bae3-3cfd117f0345',
'name': 'cg1',
'status': 'OK',
}
cgsnapshot = mock.Mock(id='192eb39b-6c2f-420c-bae3-3cfd117f9876',
consistencygroup_id=group['id'])
def cgsnap_getitem(self, val):
return self.__dict__[val]
cgsnapshot.__getitem__ = cgsnap_getitem
@mock.patch('cinder.volume.drivers.emc.xtremio.XtremIOClient.req')
class EMCXIODriverISCSITestCase(test.TestCase):
def setUp(self):
super(EMCXIODriverISCSITestCase, self).setUp()
clean_xms_data()
config = mock.Mock()
config.san_login = ''
config.san_password = ''
config.san_ip = ''
config.xtremio_cluster_name = 'brick1'
config.xtremio_provisioning_factor = 20.0
def safe_get(key):
getattr(config, key)
config.safe_get = safe_get
self.driver = xtremio.XtremIOISCSIDriver(configuration=config)
self.driver.client = xtremio.XtremIOClient4(config,
config
.xtremio_cluster_name)
self.data = CommonData()
def test_check_for_setup_error(self, req):
req.side_effect = xms_request
clusters = xms_data['clusters']
del xms_data['clusters']
self.assertRaises(exception.VolumeDriverException,
self.driver.check_for_setup_error)
xms_data['clusters'] = clusters
self.driver.check_for_setup_error()
def test_create_extend_delete_volume(self, req):
req.side_effect = xms_request
self.driver.create_volume(self.data.test_volume)
self.driver.extend_volume(self.data.test_volume, 5)
self.driver.delete_volume(self.data.test_volume)
def test_create_delete_snapshot(self, req):
req.side_effect = xms_request
self.driver.create_volume(self.data.test_volume)
self.driver.create_snapshot(self.data.test_snapshot)
self.assertEqual(self.data.test_snapshot['id'],
xms_data['volumes'][2]['name'])
self.driver.delete_snapshot(self.data.test_snapshot)
self.driver.delete_volume(self.data.test_volume)
def test_failed_rename_snapshot(self, req):
req.side_effect = xms_failed_rename_snapshot_request
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_snapshot)
self.assertEqual(0, len(xms_data['volumes']))
def test_volume_from_snapshot(self, req):
req.side_effect = xms_request
xms_data['volumes'] = {}
self.driver.create_volume(self.data.test_volume)
self.driver.create_snapshot(self.data.test_snapshot)
self.driver.create_volume_from_snapshot(self.data.test_volume2,
self.data.test_snapshot)
self.driver.delete_volume(self.data.test_volume2)
self.driver.delete_volume(self.data.test_snapshot)
self.driver.delete_volume(self.data.test_volume)
def test_clone_volume(self, req):
req.side_effect = xms_request
self.driver.create_volume(self.data.test_volume)
self.driver.create_cloned_volume(self.data.test_clone,
self.data.test_volume)
self.driver.delete_volume(self.data.test_clone)
self.driver.delete_volume(self.data.test_volume)
def test_duplicate_volume(self, req):
req.side_effect = xms_request
self.driver.create_volume(self.data.test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.data.test_volume)
self.driver.delete_volume(self.data.test_volume)
def test_no_portals_configured(self, req):
req.side_effect = xms_request
portals = xms_data['iscsi-portals'].copy()
xms_data['iscsi-portals'].clear()
lunmap = {'lun': 4}
self.assertRaises(exception.VolumeDriverException,
self.driver._get_iscsi_properties, lunmap)
xms_data['iscsi-portals'] = portals
def test_initialize_terminate_connection(self, req):
req.side_effect = xms_request
self.driver.create_volume(self.data.test_volume)
map_data = self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
self.assertEqual(map_data['data']['target_lun'], 1)
i1 = xms_data['initiators'][1]
i1['ig-id'] = ['', i1['ig-id'], 1]
i1['chap-authentication-initiator-password'] = 'chap_password1'
i1['chap-discovery-initiator-password'] = 'chap_password2'
map_data = self.driver.initialize_connection(self.data.test_volume2,
self.data.connector)
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
def test_initialize_chap_connection(self, req):
req.side_effect = xms_request
clean_xms_data()
self.driver.create_volume(self.data.test_volume)
map_data = self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
c1 = xms_data['clusters'][1]
c1['chap-authentication-mode'] = 'initiator'
c1['chap-discovery-mode'] = 'initiator'
i1 = xms_data['initiators'][1]
i1['ig-id'] = ['', i1['ig-id'], 1]
i1['chap-authentication-initiator-password'] = 'chap_password1'
i1['chap-discovery-initiator-password'] = 'chap_password2'
map_data = self.driver.initialize_connection(self.data.test_volume2,
self.data.connector)
self.assertEqual('chap_password1', map_data['data']['auth_password'])
self.assertEqual('chap_password2',
map_data['data']['discovery_auth_password'])
i1['chap-authentication-initiator-password'] = None
i1['chap-discovery-initiator-password'] = None
map_data = self.driver.initialize_connection(self.data.test_volume2,
self.data.connector)
def test_initialize_connection_bad_ig(self, req):
req.side_effect = xms_bad_request
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
self.driver.delete_volume(self.data.test_volume)
def test_get_stats(self, req):
req.side_effect = xms_request
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['volume_backend_name'],
self.driver.backend_name)
def test_manage_unmanage(self, req):
req.side_effect = xms_request
xms_data['volumes'] = {1: {'name': 'unmanaged1',
'index': 1,
'vol-size': '3',
},
}
ref_vol = {"source-name": "unmanaged1"}
invalid_ref = {"source-name": "invalid"}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.data.test_volume, invalid_ref)
self.driver.manage_existing_get_size(self.data.test_volume, ref_vol)
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
self.data.test_volume, invalid_ref)
self.driver.manage_existing(self.data.test_volume, ref_vol)
self.assertRaises(exception.VolumeNotFound, self.driver.unmanage,
self.data.test_volume2)
self.driver.unmanage(self.data.test_volume)
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot')
def test_cg_operations(self, get_all_for_cgsnapshot, req):
req.side_effect = xms_request
d = self.data
snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context)
snapshot_obj.consistencygroup_id = d.group['id']
get_all_for_cgsnapshot.return_value = [snapshot_obj]
self.driver.create_consistencygroup(d.context, d.group)
self.assertEqual(1, len(xms_data['consistency-groups']))
self.driver.update_consistencygroup(d.context, d.group,
add_volumes=[d.test_volume,
d.test_volume2])
self.assertEqual(2, len(xms_data['consistency-group-volumes']))
self.driver.update_consistencygroup(d.context, d.group,
remove_volumes=[d.test_volume2])
self.assertEqual(1, len(xms_data['consistency-group-volumes']))
self.driver.db = mock.Mock()
(self.driver.db.
volume_get_all_by_group.return_value) = [mock.MagicMock()]
self.driver.create_cgsnapshot(d.context, d.cgsnapshot)
snaps_name = self.driver._get_cgsnap_name(d.cgsnapshot)
snaps = xms_data['volumes'][1]
snaps['index'] = 1
xms_data['snapshot-sets'] = {snaps_name: snaps, 1: snaps}
self.assertRaises(exception.InvalidInput,
self.driver.create_consistencygroup_from_src,
d.context, d.group, [])
self.driver.delete_cgsnapshot(d.context, d.cgsnapshot)
self.driver.delete_consistencygroup(d.context, d.group)
@mock.patch('requests.request')
class EMCXIODriverTestCase(test.TestCase):
def setUp(self):
super(EMCXIODriverTestCase, self).setUp()
configuration = mock.Mock()
configuration.san_login = ''
configuration.san_password = ''
configuration.san_ip = ''
configuration.xtremio_cluster_name = ''
def safe_get(key):
getattr(configuration, key)
configuration.safe_get = safe_get
self.driver = xtremio.XtremIOISCSIDriver(configuration=configuration)
self.data = CommonData()
def test_retry_request(self, req):
busy_response = mock.MagicMock()
busy_response.status_code = 400
busy_response.json.return_value = {
"message": "system_is_busy",
"error_code": 400
}
good_response = mock.MagicMock()
good_response.status_code = 200
EMCXIODriverTestCase.req_count = 0
def busy_request(*args, **kwargs):
if EMCXIODriverTestCase.req_count < 1:
EMCXIODriverTestCase.req_count += 1
return busy_response
return good_response
req.side_effect = busy_request
self.driver.create_volume(self.data.test_volume)
@mock.patch('cinder.volume.drivers.emc.xtremio.XtremIOClient.req')
class EMCXIODriverFibreChannelTestCase(test.TestCase):
def setUp(self):
super(EMCXIODriverFibreChannelTestCase, self).setUp()
clean_xms_data()
config = mock.Mock()
config.san_login = ''
config.san_password = ''
config.san_ip = ''
config.xtremio_cluster_name = ''
config.xtremio_provisioning_factor = 20.0
self.driver = xtremio.XtremIOFibreChannelDriver(
configuration=config)
self.driver.client = xtremio.XtremIOClient4(config,
config.
xtremio_cluster_name)
self.data = CommonData()
def test_initialize_terminate_connection(self, req):
req.side_effect = xms_request
self.driver.create_volume(self.data.test_volume)
map_data = self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
self.assertEqual(map_data['data']['target_lun'], 1)
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
self.driver.delete_volume(self.data.test_volume)
|
|
import os
import numpy
import shutil
import json
from shock import Client as ShockClient
from biokbase.CompressionBasedDistance.Helpers import extract_seq, run_command, make_job_dir, timestamp, CommandError
from biokbase.userandjobstate.client import UserAndJobState
from multiprocessing import Pool
from itertools import combinations
from biokbase import log
# String used to separate components in paired file names.
PairSeparator = '-cbdpair-'
# Exception thrown when extract sequences failed
class ExtractError(Exception):
pass
# Exception thrown when sorting a sequence file failed
class SortError(Exception):
pass
# Exception thrown when merging sequence files failed
class MergeError(Exception):
pass
# Exception thrown when compressing a sequence file failed
class CompressError(Exception):
pass
# Exception thrown when sequence files have different sequence lengths.
class SeqLenError(Exception):
pass
# Exception thrown when saving a file to Shock failed.
class ShockError(Exception):
pass
class CompressionBasedDistance:
''' Calculate the compression based distance metric and save distance matrix to a file.
@param fileList List of paths to compressed files
@param scale Scale of distance values, 'std' for 0 to 1, 'inf' for 0 to infinity
@param outputFile Path to file with output distance matrix
@return Nothing
'''
def _cbdCalculator(self, fileList, scale, outputFile):
# Parse the files.
single_sizes = dict()
pair_sizes = dict()
for sourceFile in fileList:
# Should strip prefix too
fbase = os.path.basename(sourceFile)
# This works as long as '.sorted.xz' only occurs at the end of the path.
fname = fbase.replace('.sorted.xz', '')
if PairSeparator in fname:
pair_sizes[fname] = os.path.getsize(sourceFile)
else:
single_sizes[fname] = os.path.getsize(sourceFile)
# Map file names to indices.
fnames = single_sizes.keys()
fnames.sort()
indices = dict()
for name,i in zip(fnames, range(len(fnames))):
indices[name] = i
# Compute the distance scores.
pair_names = pair_sizes.keys()
cbd_array = numpy.zeros((len(fnames), len(fnames)), dtype=float)
for pair in pair_names:
name1, name2 = pair.split(PairSeparator)
c1 = float(single_sizes[name1])
c2 = float(single_sizes[name2])
c12 = float(pair_sizes[pair])
distance = 1.0 - ( 2.0 * ( (c1 + c2 - c12) / (c1 + c2) ) )
if distance > 1.0:
part1 = "Distance %f is greater than 1.0. " %(distance)
part2 = "Check sequence read lengths and relative number of sequence reads. "
part3 = "(c1=%f %s, c2=%f %s c12=%f %s)" %(c1, name1, c2, name2, c12, pair)
raise ValueError(part1+part2+part3)
if scale == 'inf':
distance = distance/(1.0 - distance)
cbd_array[indices[name1],indices[name2]] = distance
cbd_array[indices[name2],indices[name1]] = distance
# Build the output file in CSV format.
outf = open(outputFile, 'w')
outf.write('ID,' + ','.join(fnames) + '\n')
for i in range(len(fnames)):
outf.write(fnames[i] + ',' + ','.join(['{0:g}'.format(x) for x in cbd_array[i,:]]) + '\n')
outf.close()
return
''' Cleanup after running a job.
@note All temporary files are removed even when there is an error.
@return Nothing
'''
def _cleanup(self):
# Delete input fasta files from Shock.
for nodeId in self.input['node_ids']:
try:
self.shockClient.delete_node(nodeId)
except Exception as e:
self._log(log.ERR, 'Error deleting node %s from Shock: %s' %(nodeId, e.message))
# Remove the work directory.
shutil.rmtree(self.jobDirectory)
# Stop the process pool.
self.pool.close()
self.pool.join()
return
''' Log a message to the system log.
@param level Message level (INFO, WARNING, etc.)
@param message Message text
@return Nothing
'''
def _log(self, level, message):
# Create a logger if this is the first time the method has been called.
if self.logger is None:
submod = os.environ.get('KB_SERVICE_NAME', 'CompressionBasedDistance')
self.logger = log.log(submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, config=os.getenv('KB_DEPLOYMENT_CONFIG'))
# Log the message.
self.logger.log_message(level, message, self.context['client_ip'], self.context['user_id'], self.context['module'],
self.context['method'], self.context['call_id'])
return
def __init__(self):
self.logger = None
''' Run a job to build a distance matrix.
When successful the distance matrix csv file is stored in Shock.
@param job Dictionary with configuration variables, context variables, and input variables for job
@raise ExtractError: Error extracting sequences from input sequence file
@raise SeqLenError: Error with lengths of sequences in input sequence file
@raise SortError: Error sorting a raw sequence file
@raise MergeError: Error merging a raw sequence file
@raise CompressError: Error compressing a raw sequence file
@raise ShockError: Error saving file to Shock
@return Nothing
'''
def runJob(self, job):
self.config = job['config']
self.context = job['context']
self.input = job['input']
# Create a shock client and authenticate as the user.
self.shockClient = ShockClient(self.config['shock_url'], self.context['token'])
# Create a user and job state client and authenticate as the user.
ujsClient = UserAndJobState(self.config['userandjobstate_url'], token=self.context['token'])
# Create a process pool.
self.pool = Pool(processes=int(self.config['num_pool_processes']))
# Create a work directory for storing intermediate files.
self.jobDirectory = make_job_dir(self.config['work_folder_path'], job['id'])
self._log(log.INFO, 'Job '+job['id']+' running with work folder '+self.jobDirectory)
# Download input fasta files from Shock and extract sequences to work directory.
try:
ujsClient.update_job_progress(job['id'], self.context['token'], 'extracting sequence files', 1, timestamp(3600))
except:
pass
resultList = []
sequenceList = []
for nodeId in self.input['node_ids']:
node = self.shockClient.get_node(nodeId)
sourceFile = os.path.join(self.jobDirectory, node['file']['name'])
destFile = '%s.sequence' %(os.path.splitext(sourceFile)[0])
if PairSeparator in destFile: # Check for pair separator string in file name and replace as needed.
destFile = destFile.replace(PairSeparator, '-')
sequenceList.append(destFile)
args = dict() # Needs to be scoped here so each process gets its own copy
args['format'] = self.input['format']
args['shockUrl'] = self.config['shock_url']
args['auth'] = self.context['token']
args['sequenceLen'] = self.input['sequence_length']
args['minReads'] = self.input['min_reads']
args['maxReads'] = self.input['max_reads']
args['nodeId'] = nodeId
args['sourceFile'] = sourceFile
args['destFile'] = destFile
result = self.pool.apply_async(extract_seq, (args,))
resultList.append(result)
for result in resultList:
if result.get() != 0:
self._cleanup()
raise ExtractError("Error extracting sequences from input sequence file, result: %d" %(result.get()))
for path in self.input['file_paths']:
sourceFile = os.path.basename(path)
destFile = '%s/%s.sequence' %(self.jobDirectory, os.path.splitext(sourceFile)[0])
if PairSeparator in destFile: # Check for pair separator string in file name and replace as needed.
destFile = destFile.replace(PairSeparator, '-')
sequenceList.append(destFile)
args = dict() # Needs to be scoped here so each process gets its own copy
args['format'] = self.input['format']
args['shockUrl'] = self.config['shock_url']
args['auth'] = self.context['token']
args['sequenceLen'] = self.input['sequence_length']
args['minReads'] = self.input['min_reads']
args['maxReads'] = self.input['max_reads']
args['nodeId'] = None
args['sourceFile'] = path
args['destFile'] = destFile
result = self.pool.apply_async(extract_seq, (args,))
resultList.append(result)
for result in resultList:
try:
result.get()
except Exception as e:
self._cleanup()
raise ExtractError("Error extracting sequences from input sequence file: %s" %(e.message))
# Confirm that each file met the criteria for sequence length and number of sequences.
filesToRemove = list()
for index in range(len(sequenceList)):
# See if the file did not have the minimum number of sequences.
if not os.path.exists(sequenceList[index]):
filesToRemove.append(index)
continue
# See if the file has no data.
if os.path.getsize(sequenceList[index]) == 0:
self._cleanup()
raise SeqLenError("Sequence file '%s' has no sequences" %(sequenceList[index]))
filteredList = list()
for index in range(len(sequenceList)):
if index not in filesToRemove:
filteredList.append(sequenceList[index])
if len(filteredList) < 2:
self._cleanup()
raise SeqLenError("There are not enough sequence files that meet the sequence length or number of sequences criteria.")
# Sort the sequences.
try:
ujsClient.update_job_progress(job['id'], self.context['token'], 'sorting sequence files', 1, timestamp(3600))
except:
pass
resultList = []
sortedList = []
for sourceFile in filteredList:
destFile = '%s.sorted' %(os.path.splitext(sourceFile)[0])
sortedList.append(destFile)
args = [ '/usr/bin/sort', '--output=%s' %(destFile), sourceFile ]
result = self.pool.apply_async(run_command, (args,))
resultList.append(result)
for result in resultList:
try:
result.get()
except CommandError as e:
self._cleanup()
raise SortError("Error sorting sequence file: %s\nCommand: '%s'\nStdout: '%s'\nStderr: '%s'" %(e.message, e.cmd, e.stdout, e.stderr))
# Create combined and sorted files.
try:
ujsClient.update_job_progress(job['id'], self.context['token'], 'merging all pairs of sequence files', 1, timestamp(3600))
except:
pass
resultList = []
for p,q in combinations(sortedList, 2):
pbase = os.path.basename(p)
qbase = os.path.basename(q)
dbase = '%s%s%s.sorted' %(os.path.splitext(pbase)[0], PairSeparator, os.path.splitext(qbase)[0])
destFile = os.path.join(self.jobDirectory, dbase)
sortedList.append(destFile)
args = [ '/usr/bin/sort', '-m', '--output=%s' %(destFile), p, q ]
result = self.pool.apply_async(run_command, (args,))
resultList.append(result)
for result in resultList:
try:
result.get()
except CommandError as e:
self._cleanup()
raise MergeError("Error merging sequence file: %s\nCommand: '%s'\nStdout: '%s'\nStderr: '%s'" %(e.message, e.cmd, e.stdout, e.stderr))
# Compress all sorted files.
try:
ujsClient.update_job_progress(job['id'], self.context['token'], 'compressing sequence files', 1, timestamp(3600))
except:
pass
resultList = []
compressedList = []
for sourceFile in sortedList:
compressedList.append(sourceFile+'.xz')
if self.input['extreme']:
level = '-9e'
else:
level = '-9'
args = [ '/usr/bin/xz', '--keep', level, '--no-warn', sourceFile ]
result = self.pool.apply_async(run_command, (args,))
resultList.append(result)
for result in resultList:
try:
result.get()
except CommandError as e:
self._cleanup()
raise CompressError("Error compressing sequence file: %s\nCommand: '%s'\nStdout: '%s'\nStderr: '%s'" %(e.message, e.cmd, e.stdout, e.stderr))
# Calculate the distance matrix.
try:
ujsClient.update_job_progress(job['id'], self.context['token'], 'calculating distance matrix', 1, timestamp(3600))
except:
pass
csvFile = os.path.join(self.jobDirectory, '%s.csv' %(job['id']))
self._cbdCalculator(compressedList, self.input['scale'], csvFile)
# Store the output file in shock.
try:
ujsClient.update_job_progress(job['id'], self.context['token'], 'storing output file in shock', 1, timestamp(3600))
except:
pass
node = self.shockClient.create_node(csvFile, '')
if not node['id']:
# Shock let us down. Save the distance matrix in the work directory for possible recovery.
os.rename(csvFile, '%s/%s.csv' %(self.config['work_folder_path'], job['id']))
self._cleanup()
raise ShockError("Error saving distance matrix file to Shock. A Shock node was not created.")
# Mark the job as complete.
results = { 'shocknodes': [ node['id'] ], 'shockurl': self.config['shock_url'] }
ujsClient.complete_job(job['id'], self.context['token'], 'done', None, results)
self._log(log.INFO, 'Job '+job['id']+' completed successfully')
# Cleanup after ourselves.
self._cleanup()
return
def calculate(self, listFilePath, scale, csvFile):
# Each line of the list file is a path to a compressed file.
compressedList = list()
listFile = open(listFilePath, 'r')
for line in listFile:
compressedList.append(line.strip())
listFile.close()
# Calculate the distance matrix.
self._cbdCalculator(compressedList, scale, csvFile)
return
|
|
# pylint: disable=W0611
''' simpleOSC 0.2
ixi software - July, 2006
www.ixi-software.net
simple API for the Open SoundControl for Python (by Daniel Holth, Clinton
McChesney --> pyKit.tar.gz file at http://wiretap.stetson.edu)
Documentation at http://wiretap.stetson.edu/docs/pyKit/
The main aim of this implementation is to provide with a simple way to deal
with the OSC implementation that makes life easier to those who don't have
understanding of sockets or programming. This would not be on your screen without the help
of Daniel Holth.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Thanks for the support to Buchsenhausen, Innsbruck, Austria.
'''
from . import OSC
import socket, os, time, errno, sys
from threading import Lock
from kivy.logger import Logger
try:
# multiprocessing support is not good on window
if sys.platform in ('win32', 'cygwin'):
raise
use_multiprocessing = True
from multiprocessing import Process, Queue, Value
import multiprocessing.synchronize
Logger.info('OSC: using <multiprocessing> for socket')
except:
use_multiprocessing = False
from threading import Thread
Logger.info('OSC: using <thread> for socket')
# globals
outSocket = 0
oscThreads = {}
oscLock = Lock()
if use_multiprocessing:
def _readQueue(thread_id=None):
global oscThreads
for id in oscThreads:
if thread_id is not None:
if id != thread_id:
continue
thread = oscThreads[id]
try:
while True:
message = thread.queue.get_nowait()
thread.addressManager.handle(message)
except:
pass
class _OSCServer(Process):
def __init__(self, **kwargs):
self.addressManager = OSC.CallbackManager()
self.queue = Queue()
Process.__init__(self, args=(self.queue,))
self.daemon = True
self._isRunning = Value('b', True)
self._haveSocket= Value('b', False)
def _queue_message(self, message):
self.queue.put(message)
def _get_isRunning(self):
return self._isRunning.value
def _set_isRunning(self, value):
self._isRunning.value = value
isRunning = property(_get_isRunning, _set_isRunning)
def _get_haveSocket(self):
return self._haveSocket.value
def _set_haveSocket(self, value):
self._haveSocket.value = value
haveSocket = property(_get_haveSocket, _set_haveSocket)
else:
def _readQueue(thread_id=None):
pass
class _OSCServer(Thread):
def __init__(self, **kwargs):
Thread.__init__(self)
self.addressManager = OSC.CallbackManager()
self.daemon = True
self.isRunning = True
self.haveSocket = False
def _queue_message(self, message):
self.addressManager.handle(message)
def init() :
'''instantiates address manager and outsocket as globals
'''
assert('Not used anymore')
def bind(oscid, func, oscaddress):
'''bind given oscaddresses with given functions in address manager
'''
global oscThreads
thread = oscThreads.get(oscid, None)
if thread is None:
assert('Unknown thread')
thread.addressManager.add(func, oscaddress)
def sendMsg(oscAddress, dataArray=[], ipAddr='127.0.0.1', port=9000) :
'''create and send normal OSC msgs
defaults to '127.0.0.1', port 9000
'''
oscLock.acquire()
outSocket.sendto( createBinaryMsg(oscAddress, dataArray), (ipAddr, port))
oscLock.release()
def createBundle():
'''create bundled type of OSC messages
'''
b = OSC.OSCMessage()
b.address = ""
b.append("#bundle")
b.append(0)
b.append(0)
return b
def appendToBundle(bundle, oscAddress, dataArray):
'''create OSC mesage and append it to a given bundle
'''
bundle.append( createBinaryMsg(oscAddress, dataArray), 'b')
def sendBundle(bundle, ipAddr='127.0.0.1', port=9000) :
'''convert bundle to a binary and send it
'''
oscLock.acquire()
outSocket.sendto(bundle.message, (ipAddr, port))
oscLock.release()
def createBinaryMsg(oscAddress, dataArray):
'''create and return general type binary OSC msg
'''
m = OSC.OSCMessage()
m.address = oscAddress
for x in dataArray:
m.append(x)
return m.getBinary()
def readQueue(thread_id=None):
'''Read queues from all threads, and dispatch message.
This must be call in the main thread.
You can pass the thread id to deque message from a specific thread.
This id is returned from the listen() function'''
return _readQueue(thread_id)
################################ receive osc from The Other.
class OSCServer(_OSCServer):
def __init__(self, **kwargs):
kwargs.setdefault('ipAddr', '127.0.0.1')
kwargs.setdefault('port', 9001)
super(OSCServer, self).__init__()
self.ipAddr = kwargs.get('ipAddr')
self.port = kwargs.get('port')
def run(self):
self.haveSocket = False
# create socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# fix trouble if python leave without cleaning well the socket
# not needed under windows, he can reuse addr even if the socket
# are in fin2 or wait state.
if os.name in ['posix', 'mac'] and hasattr(socket, 'SO_REUSEADDR'):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# try to bind the socket, retry if necessary
while not self.haveSocket and self.isRunning:
try :
self.socket.bind((self.ipAddr, self.port))
self.socket.settimeout(0.5)
self.haveSocket = True
except socket.error as e:
error, message = e.args
# special handle for EADDRINUSE
if error == errno.EADDRINUSE:
Logger.error('OSC: Address %s:%i already in use, retry in 2 second' % (self.ipAddr, self.port))
else:
self.haveSocket = False
# sleep 2 second before retry
time.sleep(2)
Logger.info('OSC: listening for Tuio on %s:%i' % (self.ipAddr, self.port))
while self.isRunning:
try:
message = self.socket.recv(65535)
self._queue_message(message)
except Exception as e:
if type(e) == socket.timeout:
continue
Logger.exception('OSC: Error in Tuio recv()')
return 'no data arrived'
def listen(ipAddr='127.0.0.1', port=9001):
'''Creates a new thread listening to that port
defaults to ipAddr='127.0.0.1', port 9001
'''
global oscThreads
id = '%s:%d' % (ipAddr, port)
if id in oscThreads:
return
Logger.debug('OSC: Start thread <%s>' % id)
oscThreads[id] = OSCServer(ipAddr=ipAddr, port=port)
oscThreads[id].start()
return id
def dontListen(id = None):
'''closes the socket and kills the thread
'''
global oscThreads
if id and id in oscThreads:
ids = [id]
else:
ids = list(oscThreads.keys())
for id in ids:
#oscThreads[id].socket.close()
Logger.debug('OSC: Stop thread <%s>' % id)
oscThreads[id].isRunning = False
oscThreads[id].join()
Logger.debug('OSC: Stop thread <%s> finished' % id)
del oscThreads[id]
if __name__ == '__main__':
# example of how to use oscAPI
init()
listen() # defaults to "127.0.0.1", 9001
time.sleep(5)
# add addresses to callback manager
def printStuff(msg):
'''deals with "print" tagged OSC addresses
'''
print("printing in the printStuff function ", msg)
print("the oscaddress is ", msg[0])
print("the value is ", msg[2])
bind(printStuff, "/test")
#send normal msg, two ways
sendMsg("/test", [1, 2, 3], "127.0.0.1", 9000)
sendMsg("/test2", [1, 2, 3]) # defaults to "127.0.0.1", 9000
sendMsg("/hello") # defaults to [], "127.0.0.1", 9000
# create and send bundle, to ways to send
bundle = createBundle()
appendToBundle(bundle, "/testing/bundles", [1, 2, 3])
appendToBundle(bundle, "/testing/bundles", [4, 5, 6])
sendBundle(bundle, "127.0.0.1", 9000)
sendBundle(bundle) # defaults to "127.0.0.1", 9000
dontListen() # finally close the connection bfore exiting or program
|
|
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import schema as sa_schema, types as sqltypes
import logging
from .. import compat
from .render import _render_server_default
from sqlalchemy.util import OrderedSet
log = logging.getLogger(__name__)
def _run_filters(object_, name, type_, reflected, compare_to, object_filters):
for fn in object_filters:
if not fn(object_, name, type_, reflected, compare_to):
return False
else:
return True
def _compare_tables(conn_table_names, metadata_table_names,
object_filters,
inspector, metadata, diffs, autogen_context):
for s, tname in metadata_table_names.difference(conn_table_names):
name = '%s.%s' % (s, tname) if s else tname
metadata_table = metadata.tables[sa_schema._get_table_key(tname, s)]
if _run_filters(metadata_table, tname, "table", False, None, object_filters):
diffs.append(("add_table", metadata.tables[name]))
log.info("Detected added table %r", name)
_compare_indexes_and_uniques(s, tname, object_filters,
None,
metadata_table,
diffs, autogen_context, inspector)
removal_metadata = sa_schema.MetaData()
for s, tname in conn_table_names.difference(metadata_table_names):
name = sa_schema._get_table_key(tname, s)
exists = name in removal_metadata.tables
t = sa_schema.Table(tname, removal_metadata, schema=s)
if not exists:
inspector.reflecttable(t, None)
if _run_filters(t, tname, "table", True, None, object_filters):
diffs.append(("remove_table", t))
log.info("Detected removed table %r", name)
existing_tables = conn_table_names.intersection(metadata_table_names)
existing_metadata = sa_schema.MetaData()
conn_column_info = {}
for s, tname in existing_tables:
name = sa_schema._get_table_key(tname, s)
exists = name in existing_metadata.tables
t = sa_schema.Table(tname, existing_metadata, schema=s)
if not exists:
inspector.reflecttable(t, None)
conn_column_info[(s, tname)] = t
for s, tname in sorted(existing_tables):
name = '%s.%s' % (s, tname) if s else tname
metadata_table = metadata.tables[name]
conn_table = existing_metadata.tables[name]
if _run_filters(metadata_table, tname, "table", False, conn_table, object_filters):
_compare_columns(s, tname, object_filters,
conn_table,
metadata_table,
diffs, autogen_context, inspector)
_compare_indexes_and_uniques(s, tname, object_filters,
conn_table,
metadata_table,
diffs, autogen_context, inspector)
# TODO:
# table constraints
# sequences
def _make_index(params, conn_table):
return sa_schema.Index(
params['name'],
*[conn_table.c[cname] for cname in params['column_names']],
unique=params['unique']
)
def _make_unique_constraint(params, conn_table):
return sa_schema.UniqueConstraint(
*[conn_table.c[cname] for cname in params['column_names']],
name=params['name']
)
def _compare_columns(schema, tname, object_filters, conn_table, metadata_table,
diffs, autogen_context, inspector):
name = '%s.%s' % (schema, tname) if schema else tname
metadata_cols_by_name = dict((c.name, c) for c in metadata_table.c)
conn_col_names = dict((c.name, c) for c in conn_table.c)
metadata_col_names = OrderedSet(sorted(metadata_cols_by_name))
for cname in metadata_col_names.difference(conn_col_names):
if _run_filters(metadata_cols_by_name[cname], cname,
"column", False, None, object_filters):
diffs.append(
("add_column", schema, tname, metadata_cols_by_name[cname])
)
log.info("Detected added column '%s.%s'", name, cname)
for cname in set(conn_col_names).difference(metadata_col_names):
rem_col = sa_schema.Column(
cname,
conn_table.c[cname].type,
nullable=conn_table.c[cname].nullable,
server_default=conn_table.c[cname].server_default
)
if _run_filters(rem_col, cname,
"column", True, None, object_filters):
diffs.append(
("remove_column", schema, tname, rem_col)
)
log.info("Detected removed column '%s.%s'", name, cname)
for colname in metadata_col_names.intersection(conn_col_names):
metadata_col = metadata_cols_by_name[colname]
conn_col = conn_table.c[colname]
if not _run_filters(
metadata_col, colname, "column", False, conn_col, object_filters):
continue
col_diff = []
_compare_type(schema, tname, colname,
conn_col,
metadata_col,
col_diff, autogen_context
)
_compare_nullable(schema, tname, colname,
conn_col,
metadata_col.nullable,
col_diff, autogen_context
)
_compare_server_default(schema, tname, colname,
conn_col,
metadata_col,
col_diff, autogen_context
)
if col_diff:
diffs.append(col_diff)
class _constraint_sig(object):
def __eq__(self, other):
return self.const == other.const
def __ne__(self, other):
return self.const != other.const
def __hash__(self):
return hash(self.const)
class _uq_constraint_sig(_constraint_sig):
is_index = False
is_unique = True
def __init__(self, const):
self.const = const
self.name = const.name
self.sig = tuple(sorted([col.name for col in const.columns]))
@property
def column_names(self):
return [col.name for col in self.const.columns]
class _ix_constraint_sig(_constraint_sig):
is_index = True
def __init__(self, const):
self.const = const
self.name = const.name
self.sig = tuple(sorted([col.name for col in const.columns]))
self.is_unique = bool(const.unique)
@property
def column_names(self):
return _get_index_column_names(self.const)
def _get_index_column_names(idx):
if compat.sqla_08:
return [exp.name for exp in idx.expressions]
else:
return [col.name for col in idx.columns]
def _compare_indexes_and_uniques(schema, tname, object_filters, conn_table,
metadata_table, diffs, autogen_context, inspector):
is_create_table = conn_table is None
# 1a. get raw indexes and unique constraints from metadata ...
metadata_unique_constraints = set(uq for uq in metadata_table.constraints
if isinstance(uq, sa_schema.UniqueConstraint)
)
metadata_indexes = set(metadata_table.indexes)
# 1b. ... and from connection
if conn_table is not None and hasattr(inspector, "get_unique_constraints"):
try:
conn_uniques = inspector.get_unique_constraints(tname)
except (NotImplementedError, NoSuchTableError):
conn_uniques = []
else:
conn_uniques = []
try:
conn_indexes = inspector.get_indexes(tname)
except NoSuchTableError:
conn_indexes = []
# 2. convert conn-level objects from raw inspector records
# into schema objects
conn_uniques = set(_make_unique_constraint(uq_def, conn_table)
for uq_def in conn_uniques)
conn_indexes = set(_make_index(ix, conn_table) for ix in conn_indexes)
# 3. give the dialect a chance to omit indexes and constraints that
# we know are either added implicitly by the DB or that the DB
# can't accurately report on
autogen_context['context'].impl.\
correct_for_autogen_constraints(
conn_uniques, conn_indexes,
metadata_unique_constraints,
metadata_indexes
)
# 4. organize the constraints into "signature" collections, the
# _constraint_sig() objects provide a consistent facade over both
# Index and UniqueConstraint so we can easily work with them
# interchangeably
metadata_unique_constraints = set(_uq_constraint_sig(uq)
for uq in metadata_unique_constraints
)
metadata_indexes = set(_ix_constraint_sig(ix) for ix in metadata_indexes)
conn_unique_constraints = set(_uq_constraint_sig(uq) for uq in conn_uniques)
conn_indexes = set(_ix_constraint_sig(ix) for ix in conn_indexes)
# 5. index things by name, for those objects that have names
metadata_names = dict(
(c.name, c) for c in
metadata_unique_constraints.union(metadata_indexes)
if c.name is not None)
conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints)
conn_indexes_by_name = dict((c.name, c) for c in conn_indexes)
conn_names = dict((c.name, c) for c in
conn_unique_constraints.union(conn_indexes)
if c.name is not None)
doubled_constraints = dict(
(name, (conn_uniques_by_name[name], conn_indexes_by_name[name]))
for name in set(conn_uniques_by_name).intersection(conn_indexes_by_name)
)
# 6. index things by "column signature", to help with unnamed unique
# constraints.
conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints)
metadata_uniques_by_sig = dict(
(uq.sig, uq) for uq in metadata_unique_constraints)
metadata_indexes_by_sig = dict(
(ix.sig, ix) for ix in metadata_indexes)
unnamed_metadata_uniques = dict((uq.sig, uq) for uq in
metadata_unique_constraints if uq.name is None)
# assumptions:
# 1. a unique constraint or an index from the connection *always*
# has a name.
# 2. an index on the metadata side *always* has a name.
# 3. a unique constraint on the metadata side *might* have a name.
# 4. The backend may double up indexes as unique constraints and
# vice versa (e.g. MySQL, Postgresql)
def obj_added(obj):
if obj.is_index:
diffs.append(("add_index", obj.const))
log.info("Detected added index '%s' on %s",
obj.name, ', '.join([
"'%s'" % obj.column_names
])
)
else:
if is_create_table:
# unique constraints are created inline with table defs
return
diffs.append(("add_constraint", obj.const))
log.info("Detected added unique constraint '%s' on %s",
obj.name, ', '.join([
"'%s'" % obj.column_names
])
)
def obj_removed(obj):
if obj.is_index:
diffs.append(("remove_index", obj.const))
log.info("Detected removed index '%s' on '%s'", obj.name, tname)
else:
diffs.append(("remove_constraint", obj.const))
log.info("Detected removed unique constraint '%s' on '%s'",
obj.name, tname
)
def obj_changed(old, new, msg):
if old.is_index:
log.info("Detected changed index '%s' on '%s':%s",
old.name, tname, ', '.join(msg)
)
diffs.append(("remove_index", old.const))
diffs.append(("add_index", new.const))
else:
log.info("Detected changed unique constraint '%s' on '%s':%s",
old.name, tname, ', '.join(msg)
)
diffs.append(("remove_constraint", old.const))
diffs.append(("add_constraint", new.const))
for added_name in sorted(set(metadata_names).difference(conn_names)):
obj = metadata_names[added_name]
obj_added(obj)
for existing_name in sorted(set(metadata_names).intersection(conn_names)):
metadata_obj = metadata_names[existing_name]
if existing_name in doubled_constraints:
conn_uq, conn_idx = doubled_constraints[existing_name]
if metadata_obj.is_index:
conn_obj = conn_idx
else:
conn_obj = conn_uq
else:
conn_obj = conn_names[existing_name]
if conn_obj.is_index != metadata_obj.is_index:
obj_removed(conn_obj)
obj_added(metadata_obj)
else:
msg = []
if conn_obj.is_unique != metadata_obj.is_unique:
msg.append(' unique=%r to unique=%r' % (
conn_obj.is_unique, metadata_obj.is_unique
))
if conn_obj.sig != metadata_obj.sig:
msg.append(' columns %r to %r' % (
conn_obj.sig, metadata_obj.sig
))
if msg:
obj_changed(conn_obj, metadata_obj, msg)
for removed_name in sorted(set(conn_names).difference(metadata_names)):
conn_obj = conn_names[removed_name]
if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques:
continue
elif removed_name in doubled_constraints:
if conn_obj.sig not in metadata_indexes_by_sig and \
conn_obj.sig not in metadata_uniques_by_sig:
conn_uq, conn_idx = doubled_constraints[removed_name]
obj_removed(conn_uq)
obj_removed(conn_idx)
else:
obj_removed(conn_obj)
for uq_sig in unnamed_metadata_uniques:
if uq_sig not in conn_uniques_by_sig:
obj_added(unnamed_metadata_uniques[uq_sig])
def _compare_nullable(schema, tname, cname, conn_col,
metadata_col_nullable, diffs,
autogen_context):
conn_col_nullable = conn_col.nullable
if conn_col_nullable is not metadata_col_nullable:
diffs.append(
("modify_nullable", schema, tname, cname,
{
"existing_type": conn_col.type,
"existing_server_default": conn_col.server_default,
},
conn_col_nullable,
metadata_col_nullable),
)
log.info("Detected %s on column '%s.%s'",
"NULL" if metadata_col_nullable else "NOT NULL",
tname,
cname
)
def _compare_type(schema, tname, cname, conn_col,
metadata_col, diffs,
autogen_context):
conn_type = conn_col.type
metadata_type = metadata_col.type
if conn_type._type_affinity is sqltypes.NullType:
log.info("Couldn't determine database type "
"for column '%s.%s'", tname, cname)
return
if metadata_type._type_affinity is sqltypes.NullType:
log.info("Column '%s.%s' has no type within "
"the model; can't compare", tname, cname)
return
isdiff = autogen_context['context']._compare_type(conn_col, metadata_col)
if isdiff:
diffs.append(
("modify_type", schema, tname, cname,
{
"existing_nullable": conn_col.nullable,
"existing_server_default": conn_col.server_default,
},
conn_type,
metadata_type),
)
log.info("Detected type change from %r to %r on '%s.%s'",
conn_type, metadata_type, tname, cname
)
def _compare_server_default(schema, tname, cname, conn_col, metadata_col,
diffs, autogen_context):
metadata_default = metadata_col.server_default
conn_col_default = conn_col.server_default
if conn_col_default is None and metadata_default is None:
return False
rendered_metadata_default = _render_server_default(
metadata_default, autogen_context)
rendered_conn_default = conn_col.server_default.arg.text \
if conn_col.server_default else None
isdiff = autogen_context['context']._compare_server_default(
conn_col, metadata_col,
rendered_metadata_default,
rendered_conn_default
)
if isdiff:
conn_col_default = rendered_conn_default
diffs.append(
("modify_default", schema, tname, cname,
{
"existing_nullable": conn_col.nullable,
"existing_type": conn_col.type,
},
conn_col_default,
metadata_default),
)
log.info("Detected server default on column '%s.%s'",
tname,
cname
)
|
|
from __future__ import unicode_literals
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.service import HostingService
from reviewboard.reviews.models import ReviewRequest
from reviewboard.scmtools.models import Repository
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.webapi.resources import resources
def _normalize_id(value, allowed_cls, id_field='pk', ischecker=isinstance):
if ischecker(value, allowed_cls):
return getattr(value, id_field)
elif isinstance(value, int):
return value
else:
raise ValueError('Expected int or %r, but got %r instead'
% (allowed_cls, value))
#
# APITokenResource
#
def get_api_token_list_url(user, local_site_name=None):
return resources.api_token.get_list_url(
local_site_name=local_site_name,
username=user.username)
def get_api_token_item_url(token, local_site_name=None):
return resources.api_token.get_item_url(
local_site_name=local_site_name,
username=token.user.username,
api_token_id=token.pk)
#
# ArchivedReviewRequestResource
#
def get_archived_review_request_list_url(username, local_site_name=None):
return resources.archived_review_request.get_list_url(
local_site_name=local_site_name,
username=username)
def get_archived_review_request_item_url(username, object_id,
local_site_name=None):
return resources.archived_review_request.get_item_url(
local_site_name=local_site_name,
username=username,
review_request_id=object_id)
#
# ChangeResource
#
def get_change_list_url(review_request, local_site_name=None):
return resources.change.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_change_item_url(changedesc, local_site_name=None):
return resources.change.get_item_url(
local_site_name=local_site_name,
review_request_id=changedesc.review_request.get().display_id,
change_id=changedesc.pk)
#
# DefaultReviewerResource
#
def get_default_reviewer_list_url(local_site_name=None):
return resources.default_reviewer.get_list_url(
local_site_name=local_site_name)
def get_default_reviewer_item_url(default_reviewer_id, local_site_name=None):
return resources.default_reviewer.get_item_url(
local_site_name=local_site_name,
default_reviewer_id=default_reviewer_id)
#
# DiffResource
#
def get_diff_list_url(review_request, local_site_name=None):
return resources.diff.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_diff_item_url(review_request, diff_revision, local_site_name=None):
return resources.diff.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diff_revision)
#
# DiffFileAttachmentResource
#
def get_diff_file_attachment_list_url(repository, local_site_name=None):
return resources.diff_file_attachment.get_list_url(
local_site_name=local_site_name,
repository_id=repository.pk)
def get_diff_file_attachment_item_url(attachment, repository,
local_site_name=None):
return resources.diff_file_attachment.get_item_url(
local_site_name=local_site_name,
repository_id=repository.pk,
file_attachment_id=attachment.pk)
#
# DraftDiffResource
#
def get_draft_diff_list_url(review_request, local_site_name=None):
return resources.draft_diff.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_draft_diff_item_url(review_request, diff_revision,
local_site_name=None):
return resources.draft_diff.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diff_revision)
#
# DraftFileAttachmentResource
#
def get_draft_file_attachment_list_url(review_request, local_site_name=None):
return resources.draft_file_attachment.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_draft_file_attachment_item_url(review_request, file_attachment_id,
local_site_name=None):
return resources.draft_file_attachment.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
file_attachment_id=file_attachment_id)
#
# DraftFileDiffResource
#
def get_draft_filediff_list_url(diffset, review_request, local_site_name=None):
return resources.draft_filediff.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diffset.revision)
def get_draft_filediff_item_url(filediff, review_request,
local_site_name=None):
return resources.draft_filediff.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=filediff.diffset.revision,
filediff_id=filediff.pk)
#
# DraftOriginalFileResource
#
def get_draft_original_file_url(review_request, diffset, filediff,
local_site_name=None):
return resources.draft_original_file.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diffset.revision,
filediff_id=filediff.pk)
#
# DraftPatchedFileResource
#
def get_draft_patched_file_url(review_request, diffset, filediff,
local_site_name=None):
return resources.draft_patched_file.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diffset.revision,
filediff_id=filediff.pk)
#
# FileAttachmentResource
#
def get_file_attachment_list_url(review_request, local_site_name=None):
return resources.file_attachment.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_file_attachment_item_url(file_attachment, local_site_name=None):
return resources.file_attachment.get_item_url(
local_site_name=local_site_name,
file_attachment_id=file_attachment.id,
review_request_id=file_attachment.review_request.get().display_id)
#
# FileAttachmentCommentResource
#
def get_file_attachment_comment_list_url(file_attachment,
local_site_name=None):
return resources.file_attachment_comment.get_list_url(
local_site_name=local_site_name,
file_attachment_id=file_attachment.pk,
review_request_id=file_attachment.review_request.get().display_id)
def get_file_attachment_comment_item_url(file_attachment, comment_id,
local_site_name=None):
return resources.file_attachment_comment.get_item_url(
local_site_name=local_site_name,
file_attachment_id=file_attachment.pk,
review_request_id=file_attachment.review_request.get().display_id,
comment_id=comment_id)
#
# FileDiffResource
#
def get_filediff_list_url(diffset, review_request, local_site_name=None):
return resources.filediff.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diffset.revision)
def get_filediff_item_url(filediff, review_request, local_site_name=None):
return resources.filediff.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=filediff.diffset.revision,
filediff_id=filediff.pk)
#
# FileDiffCommentResource
#
def get_filediff_comment_list_url(filediff, local_site_name=None):
diffset = filediff.diffset
review_request = diffset.history.review_request.get()
return resources.filediff_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=filediff.diffset.revision,
filediff_id=filediff.pk)
def get_filediff_comment_item_url(filediff, comment_id, local_site_name=None):
diffset = filediff.diffset
review_request = diffset.history.review_request.get()
return resources.filediff_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=filediff.diffset.revision,
filediff_id=filediff.pk,
comment_id=comment_id)
#
# GeneralCommentResource
#
def get_general_comment_list_url(review_request, local_site_name=None):
return resources.base_review_general_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_general_comment_item_url(review_request, comment_id,
local_site_name=None):
return resources.base_review_general_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
comment_id=comment_id)
#
# HostingServiceResource
#
def get_hosting_service_list_url(local_site_name=None):
return resources.hosting_service.get_list_url(
local_site_name=local_site_name)
def get_hosting_service_item_url(hosting_service_or_id, local_site_name=None):
hosting_service_id = _normalize_id(hosting_service_or_id,
HostingService,
id_field='hosting_service_id',
ischecker=issubclass)
return resources.hosting_service.get_item_url(
local_site_name=local_site_name,
hosting_service_id=hosting_service_id)
#
# HostingServiceAccountResource
#
def get_hosting_service_account_list_url(local_site_name=None):
return resources.hosting_service_account.get_list_url(
local_site_name=local_site_name)
def get_hosting_service_account_item_url(account_or_id, local_site_name=None):
account_id = _normalize_id(account_or_id, HostingServiceAccount)
return resources.hosting_service_account.get_item_url(
local_site_name=local_site_name,
account_id=account_id)
#
# OriginalFileResource
#
def get_original_file_url(review_request, diffset, filediff,
local_site_name=None):
return resources.original_file.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diffset.revision,
filediff_id=filediff.pk)
#
# PatchedFileResource
#
def get_patched_file_url(review_request, diffset, filediff,
local_site_name=None):
return resources.patched_file.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
diff_revision=diffset.revision,
filediff_id=filediff.pk)
#
# RemoteRepositoryResource
#
def get_remote_repository_list_url(account, local_site_name=None):
return resources.remote_repository.get_list_url(
local_site_name=local_site_name,
account_id=account.pk)
def get_remote_repository_item_url(remote_repository, local_site_name=None):
return resources.remote_repository.get_item_url(
local_site_name=local_site_name,
account_id=remote_repository.hosting_service_account.pk,
repository_id=remote_repository.id)
#
# RepositoryResource
#
def get_repository_list_url(local_site_name=None):
return resources.repository.get_list_url(
local_site_name=local_site_name)
def get_repository_item_url(repository_or_id, local_site_name=None):
repository_id = _normalize_id(repository_or_id, Repository)
return resources.repository.get_item_url(
local_site_name=local_site_name,
repository_id=repository_id)
#
# RepositoryBranchesResource
#
def get_repository_branches_url(repository, local_site_name=None):
return resources.repository_branches.get_list_url(
local_site_name=local_site_name,
repository_id=repository.pk)
#
# RepositoryCommitsResource
#
def get_repository_commits_url(repository, local_site_name=None):
return resources.repository_commits.get_list_url(
local_site_name=local_site_name,
repository_id=repository.pk)
#
# RepositoryInfoResource
#
def get_repository_info_url(repository, local_site_name=None):
return resources.repository_info.get_list_url(
local_site_name=local_site_name,
repository_id=repository.pk)
#
# ReviewResource
#
def get_review_list_url(review_request, local_site_name=None):
return resources.review.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_review_item_url(review_request, review_id, local_site_name=None):
return resources.review.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
review_id=review_id)
#
# ReviewDiffCommentResource
#
def get_review_diff_comment_list_url(review, local_site_name=None):
return resources.review_diff_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk)
def get_review_diff_comment_item_url(review, comment_id, local_site_name=None):
return resources.review_diff_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk,
comment_id=comment_id)
#
# FileAttachmentCommentResource
#
def get_review_file_attachment_comment_list_url(review, local_site_name=None):
return resources.review_file_attachment_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk)
def get_review_file_attachment_comment_item_url(review, comment_id,
local_site_name=None):
return resources.review_file_attachment_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk,
comment_id=comment_id)
#
# ReviewGeneralCommentResource
#
def get_review_general_comment_list_url(review, local_site_name=None):
return resources.review_general_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk)
def get_review_general_comment_item_url(review, comment_id,
local_site_name=None):
return resources.review_general_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk,
comment_id=comment_id)
#
# ReviewGroupResource
#
def get_review_group_list_url(local_site_name=None):
return resources.review_group.get_list_url(
local_site_name=local_site_name)
def get_review_group_item_url(group_name, local_site_name=None):
return resources.review_group.get_item_url(
local_site_name=local_site_name,
group_name=group_name)
#
# ReviewGroupUserResource
#
def get_review_group_user_list_url(group_name, local_site_name=None):
return resources.review_group_user.get_list_url(
local_site_name=local_site_name,
group_name=group_name)
def get_review_group_user_item_url(group_name, username, local_site_name=None):
return resources.review_group_user.get_item_url(
local_site_name=local_site_name,
group_name=group_name,
username=username)
#
# ReviewReplyResource
#
def get_review_reply_list_url(review, local_site_name=None):
return resources.review_reply.get_list_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk)
def get_review_reply_item_url(review, reply_id, local_site_name=None):
return resources.review_reply.get_item_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk,
reply_id=reply_id)
#
# ReviewReplyDiffCommentResource
#
def get_review_reply_diff_comment_list_url(reply, local_site_name=None):
return resources.review_reply_diff_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk)
def get_review_reply_diff_comment_item_url(reply, comment_id,
local_site_name=None):
return resources.review_reply_diff_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk,
comment_id=comment_id)
#
# ReviewReplyFileAttachmentCommentResource
#
def get_review_reply_file_attachment_comment_list_url(reply,
local_site_name=None):
return resources.review_reply_file_attachment_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk)
def get_review_reply_file_attachment_comment_item_url(reply, comment_id,
local_site_name=None):
return resources.review_reply_file_attachment_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk,
comment_id=comment_id)
#
# ReviewReplyGeneralCommentResource
#
def get_review_reply_general_comment_list_url(reply, local_site_name=None):
return resources.review_reply_general_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk)
def get_review_reply_general_comment_item_url(reply, comment_id,
local_site_name=None):
return resources.review_reply_general_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk,
comment_id=comment_id)
#
# ReviewReplyScreenshotCommentResource
#
def get_review_reply_screenshot_comment_list_url(reply, local_site_name=None):
return resources.review_reply_screenshot_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk)
def get_review_reply_screenshot_comment_item_url(reply, comment_id,
local_site_name=None):
return resources.review_reply_screenshot_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=reply.review_request.display_id,
review_id=reply.base_reply_to_id,
reply_id=reply.pk,
comment_id=comment_id)
#
# ReviewRequestResource
#
def get_review_request_list_url(local_site_name=None):
return resources.review_request.get_list_url(
local_site_name=local_site_name)
def get_review_request_item_url(review_request_id, local_site_name=None):
return resources.review_request.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request_id)
#
# ReviewRequestDraftResource
#
def get_review_request_draft_url(review_request, local_site_name=None):
return resources.review_request_draft.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
#
# ReviewScreenshotCommentResource
#
def get_review_screenshot_comment_list_url(review, local_site_name=None):
return resources.review_screenshot_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk)
def get_review_screenshot_comment_item_url(review, comment_id,
local_site_name=None):
return resources.review_screenshot_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=review.review_request.display_id,
review_id=review.pk,
comment_id=comment_id)
#
# RootResource
#
def get_root_url(local_site_name=None):
return local_site_reverse('root-resource',
local_site_name=local_site_name)
#
# ScreenshotResource
#
def get_screenshot_list_url(review_request_or_id, local_site_name=None):
review_request_id = _normalize_id(review_request_or_id, ReviewRequest,
id_field='display_id')
return resources.screenshot.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request_id)
def get_screenshot_item_url(screenshot, local_site_name=None):
return resources.screenshot.get_item_url(
local_site_name=local_site_name,
screenshot_id=screenshot.pk,
review_request_id=screenshot.review_request.get().display_id)
#
# ScreenshotCommentResource
#
def get_screenshot_comment_list_url(screenshot, local_site_name=None):
return resources.screenshot_comment.get_list_url(
local_site_name=local_site_name,
review_request_id=screenshot.review_request.get().display_id,
screenshot_id=screenshot.pk)
def get_screenshot_comment_item_url(screenshot, comment_id,
local_site_name=None):
return resources.screenshot_comment.get_item_url(
local_site_name=local_site_name,
review_request_id=screenshot.review_request.get().display_id,
screenshot_id=screenshot.pk,
comment_id=comment_id)
#
# ScreenshotDraftResource
#
def get_screenshot_draft_list_url(review_request, local_site_name=None):
return resources.draft_screenshot.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_screenshot_draft_item_url(review_request, screenshot_id,
local_site_name=None):
return resources.draft_screenshot.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
screenshot_id=screenshot_id)
#
# SearchResource
#
def get_search_url(local_site_name=None):
return resources.search.get_item_url(local_site_name=local_site_name)
#
# ServerInfoResource
#
def get_server_info_url(local_site_name=None):
return resources.server_info.get_item_url(local_site_name=local_site_name)
#
# SessionResource
#
def get_session_url(local_site_name=None):
return resources.session.get_list_url(local_site_name=local_site_name)
#
# StatusResource
#
def get_status_update_list_url(review_request, local_site_name=None):
return resources.status_update.get_list_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id)
def get_status_update_item_url(review_request, status_update_id,
local_site_name=None):
return resources.status_update.get_item_url(
local_site_name=local_site_name,
review_request_id=review_request.display_id,
status_update_id=status_update_id)
#
# UserResource
#
def get_user_list_url(local_site_name=None):
return resources.user.get_list_url(
local_site_name=local_site_name)
def get_user_item_url(username, local_site_name=None):
return resources.user.get_item_url(
local_site_name=local_site_name,
username=username)
#
# UserFileAttachmentResource
#
def get_user_file_attachment_list_url(user, local_site_name=None):
return resources.user_file_attachment.get_list_url(
local_site_name=local_site_name,
username=user.username)
def get_user_file_attachment_item_url(user, file_attachment,
local_site_name=None):
return resources.user_file_attachment.get_item_url(
local_site_name=local_site_name,
username=user.username,
file_attachment_id=file_attachment.id)
#
# ValidateDiffResource
#
def get_validate_diff_url(local_site_name=None):
return resources.validate_diff.get_item_url(
local_site_name=local_site_name)
#
# WatchedReviewGroupResource
#
def get_watched_review_group_list_url(username, local_site_name=None):
return resources.watched_review_group.get_list_url(
local_site_name=local_site_name,
username=username)
def get_watched_review_group_item_url(username, object_id,
local_site_name=None):
return resources.watched_review_group.get_item_url(
local_site_name=local_site_name,
username=username,
watched_obj_id=object_id)
#
# WatchedReviewRequestResource
#
def get_watched_review_request_list_url(username, local_site_name=None):
return resources.watched_review_request.get_list_url(
local_site_name=local_site_name,
username=username)
def get_watched_review_request_item_url(username, object_id,
local_site_name=None):
return resources.watched_review_request.get_item_url(
local_site_name=local_site_name,
username=username,
watched_obj_id=object_id)
#
# WebHookResource
#
def get_webhook_list_url(local_site_name=None):
return resources.webhook.get_list_url(local_site_name=local_site_name)
def get_webhook_item_url(webhook_id, local_site_name=None):
return resources.webhook.get_item_url(local_site_name=local_site_name,
webhook_id=webhook_id)
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import time
import itertools
import cPickle
import os
import matplotlib.pyplot as plt
plt.subplots_adjust(wspace=0.01, hspace=0.01,
left=0.01, right=0.99,
bottom=0.01, top=0.99)
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import skimage.transform as trans
from skimage.feature import peak_local_max
from skimage.morphology import dilation
import matplotlib.patches as patches
import nn
import models
import utils
def resize(img):
return (trans.resize(img, [224, 224]) * 255).astype('uint8')
def softmax(x, axis=-1):
e_x = np.exp(x - np.max(x))
return e_x / np.sum(e_x, axis=axis, keepdims=True)
def IoU(box1, box2):
def inter(x1, x2, y1, y2):
if y1 < x1:
x1, x2, y1, y2 = y1, y2, x1, x2
if y1 > x2:
return 0
return min(x2, y2) - y1
def area(box):
return (box[2] - box[0]) * (box[3] - box[1])
inter_area = (inter(box1[0], box1[2], box2[0], box2[2]) *
inter(box1[1], box1[3], box2[1], box2[3]))
return inter_area / float((area(box1) + area(box2) - inter_area))
def get_vg_boxes(a, H, W):
a = np.array(a).reshape([2, 4])
objects = a[0, :].astype('float32')
subjects = a[1, :].astype('float32')
subjects[[2, 3]] += subjects[[0, 1]]
subjects[[0, 2]] /= H
subjects[[1, 3]] /= W
objects[[2, 3]] += objects[[0, 1]]
objects[[0, 2]] /= H
objects[[1, 3]] /= W
return subjects, objects
def get_pred_boxes(a):
subjects = [np.where(a[0, :, :, 0])[0][0] / 54.0,
np.where(a[0, :, :, 0])[1][0] / 54.0,
np.where(a[0, :, :, 1])[0][0] / 54.0,
np.where(a[0, :, :, 1])[1][0] / 54.0]
objects = [np.where(a[0, :, :, 2])[0][0] / 54.0,
np.where(a[0, :, :, 2])[1][0] / 54.0,
np.where(a[0, :, :, 3])[0][0] / 54.0,
np.where(a[0, :, :, 3])[1][0] / 54.0]
return subjects, objects
def fix_box(b):
if b[0] > b[2]:
b[0], b[2] = b[2], b[0]
if b[1] > b[3]:
b[1], b[3] = b[3], b[1]
return np.array(b)
def dil(x):
return dilation(dilation(x))
def model_template(images, labels,
boxes,
stage):
return models.model_detection(images, labels,
boxes,
stage)
model_factory = tf.make_template("detection", model_template)
imgs_ph = tf.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32)
class_ph = tf.placeholder(shape=[None, 41], dtype=tf.float32)
boxes_ph = tf.placeholder(shape=[None, 56, 56, 4], dtype=tf.float32)
stage_ph = tf.placeholder(shape=[], dtype=tf.int32)
tf.GLOBAL = {}
tf.GLOBAL["init"] = True
tf.GLOBAL["dropout"] = 0.0
with tf.device("/cpu:0"):
_ = model_factory(imgs_ph, [class_ph], boxes_ph, stage_ph)
tf.GLOBAL["init"] = False
tf.GLOBAL["dropout"] = 0.0
with tf.device("gpu:0"):
[label_p_v, point_p_v], loss = model_factory(imgs_ph, [class_ph],
boxes_ph, stage_ph)
dataset = cPickle.load(open('/VG/pickle/val.pickle'))
prefix = '/VG/images/'
saver = tf.train.Saver()
box_count = 0
pos = 0
guesses = 0
g_list = []
vis = 1
try:
os.mkdir('/tmp/results/correct/')
except:
pass
f1 = plt.figure(figsize=(25, 10))
f2 = plt.figure(figsize=(10, 5))
with tf.Session() as sess:
# Restore the trained model
saver.restore(sess, '')
# Loop over validation examples
for ex_count, ex in enumerate(dataset):
# Unpack example
filename, labels_gt, boxes_gt = ex
# Read image, check shape, get shape
im = pylab.imread(os.path.join(prefix, filename))
if len(im.shape) != 3:
continue
H, W = im.shape[:2]
# Preprocess GT boxes
boxes_gt = [get_vg_boxes(b, H, W) for b in
np.split(np.array(boxes_gt), len(boxes_gt) / 8)]
# Preprocess input image
im = (resize(im)[None] - 127.5) / 127.5
# stage 0
# p = sess.run(label_p_v, {imgs_ph: im,
# class_ph: label_np,
# boxes_ph: boxes_np,
# stage_ph: 0})[0]
# p = softmax(p)
# labels_pred = set(list(np.where(p[0] > (0.2 * np.max(p[0])))[0]))
for label in set(labels_gt):
label_np = np.zeros((1, 41))
label_np[0, label] = 1
def explore(box_np, corner_np, stage):
l = sess.run(point_p_v, {imgs_ph: im,
class_ph: label_np,
boxes_ph: box_np,
stage_ph: stage + 1})[stage]
corner_np[0, :, :, stage] = softmax(l, axis=(1, 2))[0, :, :, 0]
peaks = peak_local_max(softmax(l, axis=(1, 2))[0, :, :, 0],
min_distance=1,
threshold_rel=0.1, exclude_border=False,
num_peaks=4 - 2 * stage % 2)
results = []
for peak in peaks:
box_np[:, peak[0], peak[1], stage] = 1
if stage == 3:
results.append((np.array(box_np), np.array(corner_np)))
box_np[:, peak[0], peak[1], stage] = 0
else:
results += explore(np.array(box_np), np.array(corner_np),
stage + 1)
box_np[:, peak[0], peak[1], stage] = 0
return results
box_np = np.zeros((1, 56, 56, 4))
corner_np = np.zeros((1, 56, 56, 4))
result = explore(box_np, corner_np, 0)
boxes_pred = [x[0] for x in result]
corners_pred = [x[1] for x in result]
# Visualize predictions
if (200 <= ex_count <= 400):
# Create image dir
try:
os.mkdir('/tmp/results/%03d' % ex_count)
except:
pass
for ii, (box, soft_corner) in enumerate(zip(boxes_pred, corners_pred)):
# Get predicted boxes
[subject_pred, object_pred] = [np.array(fix_box(b))
for b in get_pred_boxes(box)]
# show original image
ax = f1.add_subplot(2, 5, 1)
ax.grid('off')
ax.axis('off')
ax.imshow((im[0] * 127.5 + 127.5).astype('uint8'))
# show relationship detection
ax = f1.add_subplot(2, 5, 6)
ax.grid('off')
ax.axis('off')
ax.imshow((im[0] * 127.5 + 127.5).astype('uint8'))
ax.add_patch(patches.Rectangle(object_pred[0:2][::-1] * 224,
(object_pred[3] - object_pred[1]) * 224,
(object_pred[2] - object_pred[0]) * 224,
fill=False,
linewidth=7, color='red'))
ax.add_patch(patches.Rectangle(subject_pred[0:2][::-1] * 224,
(subject_pred[3] - subject_pred[1]) * 224,
(subject_pred[2] - subject_pred[0]) * 224,
fill=False,
linewidth=4, color='blue'))
for i in range(4):
ax = f1.add_subplot(2, 5, 7 + i)
ax.grid('off')
ax.axis('off')
ax.matshow(dil(box[0, :, :, i]), cmap=plt.get_cmap('jet'))
for i in range(4):
ax = f1.add_subplot(2, 5, 2 + i)
ax.grid('off')
ax.axis('off')
ax.set_title('Stage %i' % (i + 1))
ax.matshow(soft_corner[0, :, :, i], cmap=plt.get_cmap('jet'))
f1.subplots_adjust(wspace=0.01, hspace=0.01,
left=0.05, right=0.95,
bottom=0.05, top=0.95)
f1.savefig('/tmp/results/%03d/%s-%03d.jpg' % (ex_count,
utils.LABEL_MAP[label], ii))
f1.clf()
correct_detections = np.zeros(len(labels_gt))
guesses += len(boxes_pred)
g_list.append(len(boxes_pred))
# For every predicted box
for box in boxes_pred:
# For every GT box
for ii, (label_gt, box_gt) in list(enumerate(zip(labels_gt, boxes_gt))):
# Consider only boxes with correct label
if label_gt != label:
continue
subject_gt, object_gt = fix_box(box_gt[0]), fix_box(box_gt[1])
subject_pred, object_pred = get_pred_boxes(box)
[subject_pred, object_pred] = [fix_box(b)
for b in get_pred_boxes(box)]
iou1 = IoU(subject_gt, subject_pred)
iou2 = IoU(object_gt, object_pred)
if iou1 >= 0.5 and iou2 >= 0.5:
if correct_detections[ii] < (iou1 * iou2):
correct_detections[ii] = iou1 * iou2
ax = f2.add_subplot(1, 2, 1)
ax.imshow((im[0] * 127.5 + 127.5).astype('uint8'))
ax.grid('off')
ax.axis('off')
ax.add_patch(patches.Rectangle(object_gt[0:2][::-1] * 224,
(object_gt[3] - object_gt[1]) * 224,
(object_gt[2] - object_gt[0]) * 224,
fill=False,
linewidth=7, color='red'))
ax.add_patch(patches.Rectangle(subject_gt[0:2][::-1] * 224,
(subject_gt[3] - subject_gt[1]) * 224,
(subject_gt[2] - subject_gt[0]) * 224,
fill=False,
linewidth=4, color='blue'))
ax.set_title("Ground truth for '" + utils.LABEL_MAP[label] + "'")
ax = f2.add_subplot(1, 2, 2)
ax.imshow((im[0] * 127.5 + 127.5).astype('uint8'))
ax.grid('off')
ax.axis('off')
ax.add_patch(patches.Rectangle(object_pred[0:2][::-1] * 224,
(object_pred[3] - object_pred[1]) * 224,
(object_pred[2] - object_pred[0]) * 224,
fill=False,
linewidth=7, color='red'))
ax.add_patch(patches.Rectangle(subject_pred[0:2][::-1] * 224,
(subject_pred[3] - subject_pred[1]) * 224,
(subject_pred[2] - subject_pred[0]) * 224,
fill=False,
linewidth=4, color='blue'))
ax.set_title("Prediction for '" + utils.LABEL_MAP[label] + "'")
try:
os.mkdir('/tmp/results/correct/%s' % utils.LABEL_MAP[label])
except:
pass
f2.subplots_adjust(wspace=0.01, hspace=0.01,
left=0.05, right=0.95,
bottom=0.05, top=0.95)
f2.savefig('/tmp/results/correct/%s/%s-%i.jpg' % (utils.LABEL_MAP[label],
filename[:-4], ii))
f2.clf()
box_count += len(labels_gt)
pos += np.sum(correct_detections > 0)
print(guesses / float(ex_count + 1))
print(pos / float(box_count))
|
|
# -*- coding: utf-8 -*-
"""Test CLR field support."""
import System
import pytest
from Python.Test import FieldTest
def test_public_instance_field():
"""Test public instance fields."""
ob = FieldTest()
assert ob.PublicField == 0
ob.PublicField = 1
assert ob.PublicField == 1
with pytest.raises(TypeError):
del FieldTest().PublicField
def test_public_static_field():
"""Test public static fields."""
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
FieldTest.PublicStaticField = 1
assert FieldTest.PublicStaticField == 1
assert ob.PublicStaticField == 1
ob.PublicStaticField = 0
assert ob.PublicStaticField == 0
with pytest.raises(TypeError):
del FieldTest.PublicStaticField
with pytest.raises(TypeError):
del FieldTest().PublicStaticField
def test_protected_instance_field():
"""Test protected instance fields."""
ob = FieldTest()
assert ob.ProtectedField == 0
ob.ProtectedField = 1
assert ob.ProtectedField == 1
with pytest.raises(TypeError):
del FieldTest().ProtectedField
def test_protected_static_field():
"""Test protected static fields."""
ob = FieldTest()
assert FieldTest.ProtectedStaticField == 0
FieldTest.ProtectedStaticField = 1
assert FieldTest.ProtectedStaticField == 1
assert ob.ProtectedStaticField == 1
ob.ProtectedStaticField = 0
assert ob.ProtectedStaticField == 0
with pytest.raises(TypeError):
del FieldTest.ProtectedStaticField
with pytest.raises(TypeError):
del FieldTest().ProtectedStaticField
def test_read_only_instance_field():
"""Test readonly instance fields."""
assert FieldTest().ReadOnlyField == 0
with pytest.raises(TypeError):
FieldTest().ReadOnlyField = 1
with pytest.raises(TypeError):
del FieldTest().ReadOnlyField
def test_read_only_static_field():
"""Test readonly static fields."""
ob = FieldTest()
assert FieldTest.ReadOnlyStaticField == 0
assert ob.ReadOnlyStaticField == 0
with pytest.raises(TypeError):
FieldTest.ReadOnlyStaticField = 1
with pytest.raises(TypeError):
FieldTest().ReadOnlyStaticField = 1
with pytest.raises(TypeError):
del FieldTest.ReadOnlyStaticField
with pytest.raises(TypeError):
del FieldTest().ReadOnlyStaticField
def test_constant_field():
"""Test const fields."""
ob = FieldTest()
assert FieldTest.ConstField == 0
assert ob.ConstField == 0
with pytest.raises(TypeError):
FieldTest().ConstField = 1
with pytest.raises(TypeError):
FieldTest.ConstField = 1
with pytest.raises(TypeError):
del FieldTest().ConstField
with pytest.raises(TypeError):
del FieldTest.ConstField
def test_internal_field():
"""Test internal fields."""
with pytest.raises(AttributeError):
_ = FieldTest().InternalField
with pytest.raises(AttributeError):
_ = FieldTest().InternalStaticField
with pytest.raises(AttributeError):
_ = FieldTest.InternalStaticField
def test_private_field():
"""Test private fields."""
with pytest.raises(AttributeError):
_ = FieldTest().PrivateField
with pytest.raises(AttributeError):
_ = FieldTest().PrivateStaticField
with pytest.raises(AttributeError):
_ = FieldTest.PrivateStaticField
def test_field_descriptor_get_set():
"""Test field descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
assert ob.PublicStaticField == 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
ob.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
FieldTest.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
def test_field_descriptor_wrong_type():
"""Test setting a field using a value of the wrong type."""
with pytest.raises(TypeError):
FieldTest().PublicField = "spam"
def test_field_descriptor_abuse():
"""Test field descriptor abuse."""
desc = FieldTest.__dict__['PublicField']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_boolean_field():
"""Test boolean fields."""
# change this to true / false later for Python 2.3?
ob = FieldTest()
assert ob.BooleanField is False
ob.BooleanField = True
assert ob.BooleanField is True
ob.BooleanField = False
assert ob.BooleanField is False
with pytest.raises(TypeError):
ob.BooleanField = 1
with pytest.raises(TypeError):
ob.BooleanField = 0
def test_sbyte_field():
"""Test sbyte fields."""
ob = FieldTest()
assert ob.SByteField == 0
ob.SByteField = 1
assert ob.SByteField == 1
def test_byte_field():
"""Test byte fields."""
ob = FieldTest()
assert ob.ByteField == 0
ob.ByteField = 1
assert ob.ByteField == 1
def test_char_field():
"""Test char fields."""
ob = FieldTest()
assert ob.CharField == u'A'
assert ob.CharField == 'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
assert ob.CharField == 'B'
ob.CharField = u'C'
assert ob.CharField == u'C'
assert ob.CharField == 'C'
def test_int16_field():
"""Test int16 fields."""
ob = FieldTest()
assert ob.Int16Field == 0
ob.Int16Field = 1
assert ob.Int16Field == 1
def test_int32_field():
"""Test int32 fields."""
ob = FieldTest()
assert ob.Int32Field == 0
ob.Int32Field = 1
assert ob.Int32Field == 1
def test_int64_field():
"""Test int64 fields."""
ob = FieldTest()
assert ob.Int64Field == 0
ob.Int64Field = 1
assert ob.Int64Field == 1
def test_uint16_field():
"""Test uint16 fields."""
ob = FieldTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 1
assert ob.UInt16Field == 1
def test_uint32_field():
"""Test uint32 fields."""
ob = FieldTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 1
assert ob.UInt32Field == 1
def test_uint64_field():
"""Test uint64 fields."""
ob = FieldTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 1
assert ob.UInt64Field == 1
def test_single_field():
"""Test single fields."""
ob = FieldTest()
assert ob.SingleField == 0.0
ob.SingleField = 1.1
assert ob.SingleField == System.Single(1.1)
def test_double_field():
"""Test double fields."""
ob = FieldTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.1
assert ob.DoubleField == 1.1
def test_decimal_field():
"""Test decimal fields."""
ob = FieldTest()
assert ob.DecimalField == System.Decimal(0)
ob.DecimalField = System.Decimal(1)
assert ob.DecimalField == System.Decimal(1)
def test_string_field():
"""Test string fields."""
ob = FieldTest()
assert ob.StringField == "spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
def test_interface_field():
"""Test interface fields."""
from Python.Test import Spam, ISpam
ob = FieldTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
def test_object_field():
"""Test ob fields."""
ob = FieldTest()
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
ob.ObjectField = None
assert ob.ObjectField is None
def test_enum_field():
"""Test enum fields."""
from Python.Test import ShortEnum
ob = FieldTest()
assert ob.EnumField == ShortEnum.Zero
ob.EnumField = ShortEnum.One
assert ob.EnumField == ShortEnum.One
def test_nullable_field():
"""Test nullable fields."""
ob = FieldTest()
ob.StringField = None
assert ob.StringField is None
ob.ObjectField = None
assert ob.ObjectField is None
ob.SpamField = None
assert ob.SpamField is None
# Primitive types and enums should not be set to null.
with pytest.raises(TypeError):
FieldTest().Int32Field = None
with pytest.raises(TypeError):
FieldTest().EnumField = None
|
|
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Official VHD format specs can be retrieved at:
http://technet.microsoft.com/en-us/library/bb676673.aspx
See "Download the Specifications Without Registering"
Official VHDX format specs can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
VHD related Win32 API reference:
http://msdn.microsoft.com/en-us/library/windows/desktop/dd323700.aspx
"""
import ctypes
import os
if os.name == 'nt':
from ctypes import wintypes
kernel32 = ctypes.windll.kernel32
virtdisk = ctypes.windll.virtdisk
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.windows import constants
LOG = logging.getLogger(__name__)
if os.name == 'nt':
class Win32_GUID(ctypes.Structure):
_fields_ = [("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8)]
class Win32_VIRTUAL_STORAGE_TYPE(ctypes.Structure):
_fields_ = [
('DeviceId', wintypes.ULONG),
('VendorId', Win32_GUID)
]
class Win32_RESIZE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('NewSize', ctypes.c_ulonglong)
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('RWDepth', ctypes.c_ulong),
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('GetInfoOnly', wintypes.BOOL),
('ReadOnly', wintypes.BOOL),
('ResiliencyGuid', Win32_GUID)
]
class Win32_MERGE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('MergeDepth', ctypes.c_ulong)
]
class Win32_CREATE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('UniqueId', Win32_GUID),
('MaximumSize', ctypes.c_ulonglong),
('BlockSizeInBytes', wintypes.ULONG),
('SectorSizeInBytes', wintypes.ULONG),
('PhysicalSectorSizeInBytes', wintypes.ULONG),
('ParentPath', wintypes.LPCWSTR),
('SourcePath', wintypes.LPCWSTR),
('OpenFlags', wintypes.DWORD),
('ParentVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('SourceVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('ResiliencyGuid', Win32_GUID)
]
class Win32_SIZE(ctypes.Structure):
_fields_ = [("VirtualSize", wintypes.ULARGE_INTEGER),
("PhysicalSize", wintypes.ULARGE_INTEGER),
("BlockSize", wintypes.ULONG),
("SectorSize", wintypes.ULONG)]
class Win32_PARENT_LOCATION(ctypes.Structure):
_fields_ = [('ParentResolved', wintypes.BOOL),
('ParentLocationBuffer', wintypes.WCHAR * 512)]
class Win32_PHYSICAL_DISK(ctypes.Structure):
_fields_ = [("LogicalSectorSize", wintypes.ULONG),
("PhysicalSectorSize", wintypes.ULONG),
("IsRemote", wintypes.BOOL)]
class Win32_VHD_INFO(ctypes.Union):
_fields_ = [("Size", Win32_SIZE),
("Identifier", Win32_GUID),
("ParentLocation", Win32_PARENT_LOCATION),
("ParentIdentifier", Win32_GUID),
("ParentTimestamp", wintypes.ULONG),
("VirtualStorageType", Win32_VIRTUAL_STORAGE_TYPE),
("ProviderSubtype", wintypes.ULONG),
("Is4kAligned", wintypes.BOOL),
("PhysicalDisk", Win32_PHYSICAL_DISK),
("VhdPhysicalSectorSize", wintypes.ULONG),
("SmallestSafeVirtualSize",
wintypes.ULARGE_INTEGER),
("FragmentationPercentage", wintypes.ULONG)]
class Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [("VERSION", wintypes.UINT),
("VhdInfo", Win32_VHD_INFO)]
class Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('ParentFilePath', wintypes.LPCWSTR)
]
VIRTUAL_STORAGE_TYPE_DEVICE_ISO = 1
VIRTUAL_STORAGE_TYPE_DEVICE_VHD = 2
VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 3
VIRTUAL_DISK_ACCESS_NONE = 0
VIRTUAL_DISK_ACCESS_ALL = 0x003f0000
VIRTUAL_DISK_ACCESS_CREATE = 0x00100000
VIRTUAL_DISK_ACCESS_GET_INFO = 0x80000
OPEN_VIRTUAL_DISK_FLAG_NONE = 0
OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS = 1
OPEN_VIRTUAL_DISK_VERSION_1 = 1
OPEN_VIRTUAL_DISK_VERSION_2 = 2
RESIZE_VIRTUAL_DISK_FLAG_NONE = 0
RESIZE_VIRTUAL_DISK_VERSION_1 = 1
CREATE_VIRTUAL_DISK_VERSION_2 = 2
CREATE_VHD_PARAMS_DEFAULT_BLOCK_SIZE = 0
CREATE_VIRTUAL_DISK_FLAG_NONE = 0
CREATE_VIRTUAL_DISK_FLAG_FULL_PHYSICAL_ALLOCATION = 1
MERGE_VIRTUAL_DISK_VERSION_1 = 1
MERGE_VIRTUAL_DISK_FLAG_NONE = 0x00000000
GET_VIRTUAL_DISK_INFO_SIZE = 1
GET_VIRTUAL_DISK_INFO_PARENT_LOCATION = 3
GET_VIRTUAL_DISK_INFO_VIRTUAL_STORAGE_TYPE = 6
GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE = 7
SET_VIRTUAL_DISK_INFO_PARENT_PATH = 1
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
ERROR_VHD_INVALID_TYPE = 0xC03A001B
class VHDUtils(object):
def __init__(self):
self._ext_device_id_map = {
'vhd': VIRTUAL_STORAGE_TYPE_DEVICE_VHD,
'vhdx': VIRTUAL_STORAGE_TYPE_DEVICE_VHDX}
self.create_virtual_disk_flags = {
constants.VHD_TYPE_FIXED: (
CREATE_VIRTUAL_DISK_FLAG_FULL_PHYSICAL_ALLOCATION),
constants.VHD_TYPE_DYNAMIC: CREATE_VIRTUAL_DISK_FLAG_NONE
}
self._vhd_info_members = {
GET_VIRTUAL_DISK_INFO_SIZE: 'Size',
GET_VIRTUAL_DISK_INFO_PARENT_LOCATION: 'ParentLocation',
GET_VIRTUAL_DISK_INFO_VIRTUAL_STORAGE_TYPE:
'VirtualStorageType',
GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE: 'ProviderSubtype',
}
if os.name == 'nt':
self._msft_vendor_id = (
self.get_WIN32_VIRTUAL_STORAGE_TYPE_VENDOR_MSFT())
def _run_and_check_output(self, func, *args, **kwargs):
"""Convenience helper method for running Win32 API methods."""
ignored_error_codes = kwargs.pop('ignored_error_codes', [])
ret_val = func(*args, **kwargs)
# The VHD Win32 API functions return non-zero error codes
# in case of failure.
if ret_val and ret_val not in ignored_error_codes:
error_message = self._get_error_message(ret_val)
func_name = getattr(func, '__name__', '')
err = (_("Executing Win32 API function %(func_name)s failed. "
"Error code: %(error_code)s. "
"Error message: %(error_message)s") %
{'func_name': func_name,
'error_code': ret_val,
'error_message': error_message})
LOG.exception(err)
raise exception.VolumeBackendAPIException(err)
@staticmethod
def _get_error_message(error_code):
message_buffer = ctypes.c_char_p()
kernel32.FormatMessageA(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS,
None, error_code, 0, ctypes.byref(message_buffer), 0, None)
error_message = message_buffer.value
kernel32.LocalFree(message_buffer)
return error_message
@staticmethod
def get_WIN32_VIRTUAL_STORAGE_TYPE_VENDOR_MSFT():
guid = Win32_GUID()
guid.Data1 = 0xec984aec
guid.Data2 = 0xa0f9
guid.Data3 = 0x47e9
ByteArray8 = wintypes.BYTE * 8
guid.Data4 = ByteArray8(0x90, 0x1f, 0x71, 0x41, 0x5a, 0x66, 0x34, 0x5b)
return guid
def _open(self, vhd_path, open_flag=OPEN_VIRTUAL_DISK_FLAG_NONE,
open_access_mask=VIRTUAL_DISK_ACCESS_ALL,
open_params=0):
device_id = self._get_device_id_by_path(vhd_path)
vst = Win32_VIRTUAL_STORAGE_TYPE()
vst.DeviceId = device_id
vst.VendorId = self._msft_vendor_id
handle = wintypes.HANDLE()
self._run_and_check_output(virtdisk.OpenVirtualDisk,
ctypes.byref(vst),
ctypes.c_wchar_p(vhd_path),
open_access_mask,
open_flag,
open_params,
ctypes.byref(handle))
return handle
def _close(self, handle):
kernel32.CloseHandle(handle)
def _get_device_id_by_path(self, vhd_path):
ext = os.path.splitext(vhd_path)[1][1:].lower()
device_id = self._ext_device_id_map.get(ext)
if not device_id:
raise exception.VolumeBackendAPIException(
_("Unsupported virtual disk extension: %s") % ext)
return device_id
def resize_vhd(self, vhd_path, new_max_size):
handle = self._open(vhd_path)
params = Win32_RESIZE_VIRTUAL_DISK_PARAMETERS()
params.Version = RESIZE_VIRTUAL_DISK_VERSION_1
params.NewSize = new_max_size
try:
self._run_and_check_output(virtdisk.ResizeVirtualDisk,
handle,
RESIZE_VIRTUAL_DISK_FLAG_NONE,
ctypes.byref(params),
None)
finally:
self._close(handle)
def merge_vhd(self, vhd_path):
open_params = Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1()
open_params.Version = OPEN_VIRTUAL_DISK_VERSION_1
open_params.RWDepth = 2
handle = self._open(vhd_path,
open_params=ctypes.byref(open_params))
params = Win32_MERGE_VIRTUAL_DISK_PARAMETERS()
params.Version = MERGE_VIRTUAL_DISK_VERSION_1
params.MergeDepth = 1
try:
self._run_and_check_output(virtdisk.MergeVirtualDisk,
handle,
MERGE_VIRTUAL_DISK_FLAG_NONE,
ctypes.byref(params),
None)
finally:
self._close(handle)
def _create_vhd(self, new_vhd_path, new_vhd_type, src_path=None,
max_internal_size=0, parent_path=None):
new_device_id = self._get_device_id_by_path(new_vhd_path)
vst = Win32_VIRTUAL_STORAGE_TYPE()
vst.DeviceId = new_device_id
vst.VendorId = self._msft_vendor_id
params = Win32_CREATE_VIRTUAL_DISK_PARAMETERS()
params.Version = CREATE_VIRTUAL_DISK_VERSION_2
params.UniqueId = Win32_GUID()
params.BlockSizeInBytes = CREATE_VHD_PARAMS_DEFAULT_BLOCK_SIZE
params.SectorSizeInBytes = 0x200
params.PhysicalSectorSizeInBytes = 0x200
params.OpenFlags = OPEN_VIRTUAL_DISK_FLAG_NONE
params.ResiliencyGuid = Win32_GUID()
params.MaximumSize = max_internal_size
params.ParentPath = parent_path
params.ParentVirtualStorageType = Win32_VIRTUAL_STORAGE_TYPE()
if src_path:
src_device_id = self._get_device_id_by_path(src_path)
params.SourcePath = src_path
params.SourceVirtualStorageType = Win32_VIRTUAL_STORAGE_TYPE()
params.SourceVirtualStorageType.DeviceId = src_device_id
params.SourceVirtualStorageType.VendorId = self._msft_vendor_id
handle = wintypes.HANDLE()
create_virtual_disk_flag = self.create_virtual_disk_flags.get(
new_vhd_type)
try:
self._run_and_check_output(virtdisk.CreateVirtualDisk,
ctypes.byref(vst),
ctypes.c_wchar_p(new_vhd_path),
VIRTUAL_DISK_ACCESS_NONE,
None,
create_virtual_disk_flag,
0,
ctypes.byref(params),
None,
ctypes.byref(handle))
finally:
self._close(handle)
def get_vhd_info(self, vhd_path, info_members=None):
vhd_info = {}
info_members = info_members or self._vhd_info_members
handle = self._open(vhd_path,
open_access_mask=VIRTUAL_DISK_ACCESS_GET_INFO)
try:
for member in info_members:
info = self._get_vhd_info_member(handle, member)
vhd_info.update(info)
finally:
self._close(handle)
return vhd_info
def _get_vhd_info_member(self, vhd_file, info_member):
virt_disk_info = Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS()
virt_disk_info.VERSION = ctypes.c_uint(info_member)
infoSize = ctypes.sizeof(virt_disk_info)
virtdisk.GetVirtualDiskInformation.restype = wintypes.DWORD
# Note(lpetrut): If the vhd has no parent image, this will
# return an error. No need to raise an exception in this case.
ignored_error_codes = []
if info_member == GET_VIRTUAL_DISK_INFO_PARENT_LOCATION:
ignored_error_codes.append(ERROR_VHD_INVALID_TYPE)
self._run_and_check_output(virtdisk.GetVirtualDiskInformation,
vhd_file,
ctypes.byref(ctypes.c_ulong(infoSize)),
ctypes.byref(virt_disk_info),
0,
ignored_error_codes=ignored_error_codes)
return self._parse_vhd_info(virt_disk_info, info_member)
def _parse_vhd_info(self, virt_disk_info, info_member):
vhd_info = {}
vhd_info_member = self._vhd_info_members[info_member]
info = getattr(virt_disk_info.VhdInfo, vhd_info_member)
if hasattr(info, '_fields_'):
for field in info._fields_:
vhd_info[field[0]] = getattr(info, field[0])
else:
vhd_info[vhd_info_member] = info
return vhd_info
def get_vhd_size(self, vhd_path):
"""Return vhd size.
Returns a dict containing the virtual size, physical size,
block size and sector size of the vhd.
"""
size = self.get_vhd_info(vhd_path,
[GET_VIRTUAL_DISK_INFO_SIZE])
return size
def get_vhd_parent_path(self, vhd_path):
vhd_info = self.get_vhd_info(vhd_path,
[GET_VIRTUAL_DISK_INFO_PARENT_LOCATION])
parent_path = vhd_info['ParentLocationBuffer']
if len(parent_path) > 0:
return parent_path
return None
def create_dynamic_vhd(self, path, max_internal_size):
self._create_vhd(path,
constants.VHD_TYPE_DYNAMIC,
max_internal_size=max_internal_size)
def convert_vhd(self, src, dest,
vhd_type=constants.VHD_TYPE_DYNAMIC):
self._create_vhd(dest, vhd_type, src_path=src)
def create_differencing_vhd(self, path, parent_path):
self._create_vhd(path,
constants.VHD_TYPE_DIFFERENCING,
parent_path=parent_path)
def reconnect_parent(self, child_path, parent_path):
open_params = Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2()
open_params.Version = OPEN_VIRTUAL_DISK_VERSION_2
open_params.GetInfoOnly = False
handle = self._open(
child_path,
open_flag=OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS,
open_access_mask=VIRTUAL_DISK_ACCESS_NONE,
open_params=ctypes.byref(open_params))
params = Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS()
params.Version = SET_VIRTUAL_DISK_INFO_PARENT_PATH
params.ParentFilePath = parent_path
try:
self._run_and_check_output(virtdisk.SetVirtualDiskInformation,
handle,
ctypes.byref(params))
finally:
self._close(handle)
|
|
#!/usr/bin/env python
"""Utils exporting data from AFF4 to the rest of the world."""
import os
import Queue
import stat
import time
import logging
from grr.lib import aff4
from grr.lib import client_index
from grr.lib import rdfvalue
from grr.lib import sequential_collection
from grr.lib import serialize
from grr.lib import threadpool
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import standard
from grr.lib.flows.general import collectors
from grr.lib.hunts import results
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import flows as rdf_flows
BUFFER_SIZE = 16 * 1024 * 1024
def GetAllClients(token=None):
"""Return a list of all client urns."""
index = client_index.CreateClientIndex(token=token)
return index.LookupClients(["."])
class IterateAllClientUrns(object):
"""Class to iterate over all URNs."""
THREAD_POOL_NAME = "ClientUrnIter"
QUEUE_TIMEOUT = 30
def __init__(self, func=None, max_threads=10, token=None):
"""Iterate over all clients in a threadpool.
Args:
func: A function to call with each client urn.
max_threads: Number of threads to use.
token: Auth token.
Raises:
RuntimeError: If function not specified.
"""
self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME,
max_threads)
self.thread_pool.Start()
self.token = token
self.func = func
self.broken_subjects = [] # Entries that are broken or fail to run.
self.out_queue = Queue.Queue()
def GetInput(self):
"""Yield client urns."""
clients = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(clients))
return clients
def Run(self):
"""Run the iteration."""
count = 0
for count, input_data in enumerate(self.GetInput()):
if count % 2000 == 0:
logging.debug("%d processed.", count)
args = (input_data, self.out_queue, self.token)
self.thread_pool.AddTask(
target=self.IterFunction, args=args, name=self.THREAD_POOL_NAME)
while count >= 0:
try:
# We only use the timeout to wait if we got to the end of the Queue but
# didn't process everything yet.
out = self.out_queue.get(timeout=self.QUEUE_TIMEOUT, block=True)
if out:
yield out
count -= 1
except Queue.Empty:
break
# Join and stop to clean up the threadpool.
self.thread_pool.Stop()
def IterFunction(self, *args):
"""Function to run on each input. This can be overridden."""
self.func(*args)
class IterateAllClients(IterateAllClientUrns):
"""Class to iterate over all GRR Client objects."""
def __init__(self, max_age, client_chunksize=25, **kwargs):
"""Iterate over all clients in a threadpool.
Args:
max_age: Maximum age in seconds of clients to check.
client_chunksize: A function to call with each client urn.
**kwargs: Arguments passed to init.
"""
super(IterateAllClients, self).__init__(**kwargs)
self.client_chunksize = client_chunksize
self.max_age = max_age
def GetInput(self):
"""Yield client urns."""
client_list = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(client_list))
for client_group in utils.Grouper(client_list, self.client_chunksize):
for fd in aff4.FACTORY.MultiOpen(
client_group,
mode="r",
aff4_type=aff4_grr.VFSGRRClient,
token=self.token):
if isinstance(fd, aff4_grr.VFSGRRClient):
# Skip if older than max_age
oldest_time = (time.time() - self.max_age) * 1e6
if fd.Get(aff4_grr.VFSGRRClient.SchemaCls.PING) >= oldest_time:
yield fd
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):
"""Download an aff4 file to the local filesystem overwriting it if it exists.
Args:
file_obj: An aff4 object that supports the file interface (Read, Seek)
target_path: Full path of file to write to.
buffer_size: Read in chunks this size.
"""
logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path)
target_file = open(target_path, "wb")
file_obj.Seek(0)
count = 0
data_buffer = file_obj.Read(buffer_size)
while data_buffer:
target_file.write(data_buffer)
data_buffer = file_obj.Read(buffer_size)
count += 1
if not count % 3:
logging.debug(u"Downloading: %s: %s done", file_obj.urn,
utils.FormatNumberAsString(count * buffer_size))
target_file.close()
def RecursiveDownload(dir_obj,
target_dir,
max_depth=10,
depth=1,
overwrite=False,
max_threads=10):
"""Recursively downloads a file entry to the target path.
Args:
dir_obj: An aff4 object that contains children.
target_dir: Full path of the directory to write to.
max_depth: Depth to download to. 1 means just the directory itself.
depth: Current depth of recursion.
overwrite: Should we overwrite files that exist.
max_threads: Use this many threads to do the downloads.
"""
if not isinstance(dir_obj, aff4.AFF4Volume):
return
# Reuse the same threadpool as we call recursively.
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
for sub_file_entry in dir_obj.OpenChildren():
path_elements = [target_dir]
sub_target_dir = u"/".join(path_elements)
try:
# Any file-like object with data in AFF4 should inherit AFF4Stream.
if isinstance(sub_file_entry, aff4.AFF4Stream):
args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token,
overwrite)
thread_pool.AddTask(
target=CopyAFF4ToLocal, args=args, name="Downloader")
elif "Container" in sub_file_entry.behaviours:
if depth >= max_depth: # Don't go any deeper.
continue
try:
os.makedirs(sub_target_dir)
except OSError:
pass
RecursiveDownload(
sub_file_entry,
sub_target_dir,
overwrite=overwrite,
depth=depth + 1)
except IOError:
logging.exception("Unable to download %s", sub_file_entry.urn)
finally:
sub_file_entry.Close()
# Join and stop the threadpool.
if depth <= 1:
thread_pool.Stop()
def _OpenCollectionPath(coll_path, token=None):
"""Tries to open various types of collections at the given path."""
coll = aff4.FACTORY.Open(coll_path, token=token)
if coll.__class__.__name__ == "RDFValueCollection":
return coll
collection = results.HuntResultCollection(coll_path, token=token)
if collection and collection[0].payload:
return collection
collection = sequential_collection.GeneralIndexedCollection(
coll_path, token=token)
if collection:
return collection
def DownloadCollection(coll_path,
target_path,
token=None,
overwrite=False,
dump_client_info=False,
flatten=False,
max_threads=10):
"""Iterate through a Collection object downloading all files.
Args:
coll_path: Path to an AFF4 collection.
target_path: Base directory to write to.
token: Token for access.
overwrite: If True, overwrite existing files.
dump_client_info: If True, this will detect client paths, and dump a yaml
version of the client object to the root path. This is useful for seeing
the hostname/users of the machine the client id refers to.
flatten: If True, produce a "files" flat folder with links to all the found
files.
max_threads: Use this many threads to do the downloads.
"""
completed_clients = set()
coll = _OpenCollectionPath(coll_path, token=token)
if coll is None:
logging.error("%s is not a valid collection. Typo? "
"Are you sure something was written to it?", coll_path)
return
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
# Extract the client id from the source urn. This code makes me
# quite sad but there is just no concept of passing a client id in
# the export tool (and this would be unsafe anyways since the user
# could then download files from arbitrary machines easily). The
# export tool is on the way to deprecation so we decided to do this
# instead of fixing the obsolete code.
try:
collection_urn = coll.collection_id
except AttributeError:
collection_urn = coll.urn
try:
original_client_id = rdf_client.ClientURN(collection_urn.Split()[0])
except IOError:
original_client_id = None
logging.info("Expecting to download %s files", len(coll))
# Collections can include anything they want, but we only handle RDFURN and
# StatEntry entries in this function.
for grr_message in coll:
source = None
# If a raw message, work out the type.
if isinstance(grr_message, rdf_flows.GrrMessage):
source = grr_message.source
grr_message = grr_message.payload
if isinstance(grr_message, rdfvalue.RDFURN):
urn = grr_message
elif isinstance(grr_message, rdf_client.StatEntry):
urn = rdfvalue.RDFURN(grr_message.AFF4Path(source or original_client_id))
elif isinstance(grr_message, rdf_file_finder.FileFinderResult):
urn = rdfvalue.RDFURN(
grr_message.stat_entry.AFF4Path(source or original_client_id))
elif isinstance(grr_message, collectors.ArtifactFilesDownloaderResult):
if grr_message.HasField("downloaded_file"):
urn = grr_message.downloaded_file.AFF4Path(source or original_client_id)
else:
continue
elif isinstance(grr_message, rdfvalue.RDFBytes):
try:
os.makedirs(target_path)
except OSError:
pass
try:
# We just dump out bytes and carry on.
client_id = source.Split()[0]
with open(os.path.join(target_path, client_id), "wb") as fd:
fd.write(str(grr_message))
except AttributeError:
pass
continue
else:
continue
# Handle dumping client info, but only once per client.
if dump_client_info:
client_id = urn.Split()[0]
re_match = aff4_grr.VFSGRRClient.CLIENT_ID_RE.match(client_id)
if re_match and client_id not in completed_clients:
args = (rdf_client.ClientURN(client_id), target_path, token, overwrite)
thread_pool.AddTask(
target=DumpClientYaml, args=args, name="ClientYamlDownloader")
completed_clients.add(client_id)
# Now queue downloading the actual files.
args = (urn, target_path, token, overwrite)
if flatten:
target = CopyAndSymlinkAFF4ToLocal
else:
target = CopyAFF4ToLocal
thread_pool.AddTask(target=target, args=args, name="Downloader")
# Join and stop the threadpool.
thread_pool.Stop()
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):
"""Copy an AFF4 object that supports a read interface to local filesystem.
Args:
aff4_urn: URN of thing to copy.
target_dir: Directory to copy the file to.
token: Auth token.
overwrite: If True overwrite the file if it exists.
Returns:
If aff4_urn points to a file, returns path to the downloaded file.
Otherwise returns None.
By default file will only be overwritten if file size differs.
"""
try:
fd = aff4.FACTORY.Open(aff4_urn, token=token)
filepath = os.path.join(target_dir, fd.urn.Path()[1:])
# If urn points to a directory, just create it.
if isinstance(fd, standard.VFSDirectory):
try:
os.makedirs(filepath)
except OSError:
pass
return None
# If urn points to a file, download it.
elif isinstance(fd, aff4.AFF4Stream):
if not os.path.isfile(filepath):
try:
# Ensure directory exists.
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
DownloadFile(fd, filepath)
elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or
overwrite):
# We should overwrite because user said, or file sizes differ.
DownloadFile(fd, filepath)
else:
logging.info("File %s exists, skipping", filepath)
return filepath
else:
raise RuntimeError("Opened urn is neither a downloaded file nor a "
"directory: %s" % aff4_urn)
except IOError as e:
logging.exception("Failed to read %s due to %s", aff4_urn, e)
raise
def CopyAndSymlinkAFF4ToLocal(aff4_urn, target_dir, token=None,
overwrite=False):
path = CopyAFF4ToLocal(aff4_urn, target_dir, token=token, overwrite=overwrite)
if path:
files_output_dir = os.path.join(target_dir, "files")
try:
os.makedirs(files_output_dir)
except OSError:
pass
unique_name = "_".join(aff4_urn.Split())
symlink_path = os.path.join(files_output_dir, unique_name)
try:
os.symlink(path, symlink_path)
except OSError:
logging.exception("Can't create symlink to a file: %s -> %s",
symlink_path, path)
def DumpClientYaml(client_urn, target_dir, token=None, overwrite=False):
"""Dump a yaml file containing client info."""
fd = aff4.FACTORY.Open(client_urn, aff4_grr.VFSGRRClient, token=token)
dirpath = os.path.join(target_dir, fd.urn.Split()[0])
try:
# Due to threading this can actually be created by another thread.
os.makedirs(dirpath)
except OSError:
pass
filepath = os.path.join(dirpath, "client_info.yaml")
if not os.path.isfile(filepath) or overwrite:
with open(filepath, "wb") as out_file:
out_file.write(serialize.YamlDumper(fd))
|
|
from typing import Any, Dict, List, Optional, Union
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from zerver.decorator import require_member_or_admin, require_realm_admin
from zerver.forms import PASSWORD_TOO_WEAK_ERROR, CreateUserForm
from zerver.lib.actions import (
check_change_bot_full_name,
check_change_full_name,
check_remove_custom_profile_field_value,
do_change_avatar_fields,
do_change_bot_owner,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_user_role,
do_create_user,
do_deactivate_user,
do_reactivate_user,
do_regenerate_api_key,
do_update_bot_config_data,
do_update_outgoing_webhook_service,
do_update_user_custom_profile_data_if_changed,
notify_created_bot,
)
from zerver.lib.avatar import avatar_url, get_gravatar_url
from zerver.lib.bot_config import set_bot_config
from zerver.lib.email_validation import email_allowed_for_realm
from zerver.lib.exceptions import (
CannotDeactivateLastUserError,
JsonableError,
OrganizationOwnerRequired,
)
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.streams import access_stream_by_id, access_stream_by_name, subscribed_to_stream
from zerver.lib.types import Validator
from zerver.lib.upload import upload_avatar_image
from zerver.lib.url_encoding import add_query_arg_to_redirect_url
from zerver.lib.users import (
access_bot_by_id,
access_user_by_id,
add_service,
check_bot_creation_policy,
check_bot_name_available,
check_full_name,
check_short_name,
check_valid_bot_config,
check_valid_bot_type,
check_valid_interface_type,
get_api_key,
get_raw_user_data,
validate_user_custom_profile_data,
)
from zerver.lib.utils import generate_api_key
from zerver.lib.validator import (
check_bool,
check_dict,
check_dict_only,
check_int,
check_int_in,
check_list,
check_none_or,
check_string,
check_union,
check_url,
)
from zerver.models import (
DisposableEmailError,
DomainNotAllowedForRealmError,
EmailContainsPlusError,
InvalidFakeEmailDomain,
Message,
Realm,
Service,
Stream,
UserProfile,
get_user,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_including_cross_realm,
get_user_profile_by_id_in_realm,
)
from zproject.backends import check_password_strength
def check_last_owner(user_profile: UserProfile) -> bool:
owners = set(user_profile.realm.get_human_owner_users())
return user_profile.is_realm_owner and not user_profile.is_bot and len(owners) == 1
def deactivate_user_backend(
request: HttpRequest, user_profile: UserProfile, user_id: int
) -> HttpResponse:
target = access_user_by_id(user_profile, user_id, for_admin=True)
if target.is_realm_owner and not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
if check_last_owner(target):
raise JsonableError(_("Cannot deactivate the only organization owner"))
return _deactivate_user_profile_backend(request, user_profile, target)
def deactivate_user_own_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if UserProfile.objects.filter(realm=user_profile.realm, is_active=True).count() == 1:
raise CannotDeactivateLastUserError(is_last_owner=False)
if user_profile.is_realm_owner and check_last_owner(user_profile):
raise CannotDeactivateLastUserError(is_last_owner=True)
do_deactivate_user(user_profile, acting_user=user_profile)
return json_success()
def deactivate_bot_backend(
request: HttpRequest, user_profile: UserProfile, bot_id: int
) -> HttpResponse:
target = access_bot_by_id(user_profile, bot_id)
return _deactivate_user_profile_backend(request, user_profile, target)
def _deactivate_user_profile_backend(
request: HttpRequest, user_profile: UserProfile, target: UserProfile
) -> HttpResponse:
do_deactivate_user(target, acting_user=user_profile)
return json_success()
def reactivate_user_backend(
request: HttpRequest, user_profile: UserProfile, user_id: int
) -> HttpResponse:
target = access_user_by_id(
user_profile, user_id, allow_deactivated=True, allow_bots=True, for_admin=True
)
if target.is_bot:
assert target.bot_type is not None
check_bot_creation_policy(user_profile, target.bot_type)
do_reactivate_user(target, acting_user=user_profile)
return json_success()
check_profile_data: Validator[List[Dict[str, Optional[Union[int, str, List[int]]]]]] = check_list(
check_dict_only(
[
("id", check_int),
(
"value",
check_none_or(
check_union([check_int, check_string, check_list(check_int)]),
),
),
]
),
)
@has_request_variables
def update_user_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: int,
full_name: Optional[str] = REQ(default=None, json_validator=check_string),
role: Optional[int] = REQ(
default=None,
json_validator=check_int_in(
UserProfile.ROLE_TYPES,
),
),
profile_data: Optional[List[Dict[str, Optional[Union[int, str, List[int]]]]]] = REQ(
default=None,
json_validator=check_profile_data,
),
) -> HttpResponse:
target = access_user_by_id(
user_profile, user_id, allow_deactivated=True, allow_bots=True, for_admin=True
)
if role is not None and target.role != role:
# Require that the current user has permissions to
# grant/remove the role in question. access_user_by_id has
# already verified we're an administrator; here we enforce
# that only owners can toggle the is_realm_owner flag.
if UserProfile.ROLE_REALM_OWNER in [role, target.role] and not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
if target.role == UserProfile.ROLE_REALM_OWNER and check_last_owner(user_profile):
raise JsonableError(
_("The owner permission cannot be removed from the only organization owner.")
)
do_change_user_role(target, role, acting_user=user_profile)
if full_name is not None and target.full_name != full_name and full_name.strip() != "":
# We don't respect `name_changes_disabled` here because the request
# is on behalf of the administrator.
check_change_full_name(target, full_name, user_profile)
if profile_data is not None:
clean_profile_data = []
for entry in profile_data:
assert isinstance(entry["id"], int)
if entry["value"] is None or not entry["value"]:
field_id = entry["id"]
check_remove_custom_profile_field_value(target, field_id)
else:
clean_profile_data.append(
{
"id": entry["id"],
"value": entry["value"],
}
)
validate_user_custom_profile_data(target.realm.id, clean_profile_data)
do_update_user_custom_profile_data_if_changed(target, clean_profile_data)
return json_success()
def avatar(
request: HttpRequest, user_profile: UserProfile, email_or_id: str, medium: bool = False
) -> HttpResponse:
"""Accepts an email address or user ID and returns the avatar"""
is_email = False
try:
int(email_or_id)
except ValueError:
is_email = True
try:
realm = user_profile.realm
if is_email:
avatar_user_profile = get_user_including_cross_realm(email_or_id, realm)
else:
avatar_user_profile = get_user_by_id_in_realm_including_cross_realm(
int(email_or_id), realm
)
# If there is a valid user account passed in, use its avatar
url = avatar_url(avatar_user_profile, medium=medium)
except UserProfile.DoesNotExist:
# If there is no such user, treat it as a new gravatar
email = email_or_id
avatar_version = 1
url = get_gravatar_url(email, avatar_version, medium)
# We can rely on the URL already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_avatar_url does '?x=x'
# hacks to prevent us from having to jump through decode/encode hoops.
assert url is not None
url = add_query_arg_to_redirect_url(url, request.META["QUERY_STRING"])
return redirect(url)
def get_stream_name(stream: Optional[Stream]) -> Optional[str]:
if stream:
return stream.name
return None
@require_member_or_admin
@has_request_variables
def patch_bot_backend(
request: HttpRequest,
user_profile: UserProfile,
bot_id: int,
full_name: Optional[str] = REQ(default=None),
bot_owner_id: Optional[int] = REQ(json_validator=check_int, default=None),
config_data: Optional[Dict[str, str]] = REQ(
default=None, json_validator=check_dict(value_validator=check_string)
),
service_payload_url: Optional[str] = REQ(json_validator=check_url, default=None),
service_interface: int = REQ(json_validator=check_int, default=1),
default_sending_stream: Optional[str] = REQ(default=None),
default_events_register_stream: Optional[str] = REQ(default=None),
default_all_public_streams: Optional[bool] = REQ(default=None, json_validator=check_bool),
) -> HttpResponse:
bot = access_bot_by_id(user_profile, bot_id)
if full_name is not None:
check_change_bot_full_name(bot, full_name, user_profile)
if bot_owner_id is not None:
try:
owner = get_user_profile_by_id_in_realm(bot_owner_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Failed to change owner, no such user"))
if not owner.is_active:
raise JsonableError(_("Failed to change owner, user is deactivated"))
if owner.is_bot:
raise JsonableError(_("Failed to change owner, bots can't own other bots"))
previous_owner = bot.bot_owner
if previous_owner != owner:
do_change_bot_owner(bot, owner, user_profile)
if default_sending_stream is not None:
if default_sending_stream == "":
stream: Optional[Stream] = None
else:
(stream, sub) = access_stream_by_name(user_profile, default_sending_stream)
do_change_default_sending_stream(bot, stream, acting_user=user_profile)
if default_events_register_stream is not None:
if default_events_register_stream == "":
stream = None
else:
(stream, sub) = access_stream_by_name(user_profile, default_events_register_stream)
do_change_default_events_register_stream(bot, stream, acting_user=user_profile)
if default_all_public_streams is not None:
do_change_default_all_public_streams(
bot, default_all_public_streams, acting_user=user_profile
)
if service_payload_url is not None:
check_valid_interface_type(service_interface)
assert service_interface is not None
do_update_outgoing_webhook_service(bot, service_interface, service_payload_url)
if config_data is not None:
do_update_bot_config_data(bot, config_data)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_fields(bot, avatar_source, acting_user=user_profile)
else:
raise JsonableError(_("You may only upload one file at a time"))
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
service_interface=service_interface,
service_payload_url=service_payload_url,
config_data=config_data,
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
# Don't include the bot owner in case it is not set.
# Default bots have no owner.
if bot.bot_owner is not None:
json_result["bot_owner"] = bot.bot_owner.email
return json_success(json_result)
@require_member_or_admin
@has_request_variables
def regenerate_bot_api_key(
request: HttpRequest, user_profile: UserProfile, bot_id: int
) -> HttpResponse:
bot = access_bot_by_id(user_profile, bot_id)
new_api_key = do_regenerate_api_key(bot, user_profile)
json_result = dict(
api_key=new_api_key,
)
return json_success(json_result)
@require_member_or_admin
@has_request_variables
def add_bot_backend(
request: HttpRequest,
user_profile: UserProfile,
full_name_raw: str = REQ("full_name"),
short_name_raw: str = REQ("short_name"),
bot_type: int = REQ(json_validator=check_int, default=UserProfile.DEFAULT_BOT),
payload_url: str = REQ(json_validator=check_url, default=""),
service_name: Optional[str] = REQ(default=None),
config_data: Dict[str, str] = REQ(
default={}, json_validator=check_dict(value_validator=check_string)
),
interface_type: int = REQ(json_validator=check_int, default=Service.GENERIC),
default_sending_stream_name: Optional[str] = REQ("default_sending_stream", default=None),
default_events_register_stream_name: Optional[str] = REQ(
"default_events_register_stream", default=None
),
default_all_public_streams: Optional[bool] = REQ(json_validator=check_bool, default=None),
) -> HttpResponse:
short_name = check_short_name(short_name_raw)
if bot_type != UserProfile.INCOMING_WEBHOOK_BOT:
service_name = service_name or short_name
short_name += "-bot"
full_name = check_full_name(full_name_raw)
try:
email = f"{short_name}@{user_profile.realm.get_bot_domain()}"
except InvalidFakeEmailDomain:
raise JsonableError(
_(
"Can't create bots until FAKE_EMAIL_DOMAIN is correctly configured.\n"
"Please contact your server administrator."
)
)
form = CreateUserForm({"full_name": full_name, "email": email})
if bot_type == UserProfile.EMBEDDED_BOT:
if not settings.EMBEDDED_BOTS_ENABLED:
raise JsonableError(_("Embedded bots are not enabled."))
if service_name not in [bot.name for bot in EMBEDDED_BOTS]:
raise JsonableError(_("Invalid embedded bot name."))
if not form.is_valid():
# We validate client-side as well
raise JsonableError(_("Bad name or username"))
try:
get_user_by_delivery_email(email, user_profile.realm)
raise JsonableError(_("Username already in use"))
except UserProfile.DoesNotExist:
pass
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=full_name,
)
check_bot_creation_policy(user_profile, bot_type)
check_valid_bot_type(user_profile, bot_type)
check_valid_interface_type(interface_type)
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
raise JsonableError(_("You may only upload one file at a time"))
else:
avatar_source = UserProfile.AVATAR_FROM_USER
default_sending_stream = None
if default_sending_stream_name is not None:
(default_sending_stream, ignored_sub) = access_stream_by_name(
user_profile, default_sending_stream_name
)
default_events_register_stream = None
if default_events_register_stream_name is not None:
(default_events_register_stream, ignored_sub) = access_stream_by_name(
user_profile, default_events_register_stream_name
)
if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT) and service_name:
check_valid_bot_config(bot_type, service_name, config_data)
bot_profile = do_create_user(
email=email,
password=None,
realm=user_profile.realm,
full_name=full_name,
bot_type=bot_type,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
acting_user=user_profile,
)
if len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot_profile)
if bot_type in (UserProfile.OUTGOING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
assert isinstance(service_name, str)
add_service(
name=service_name,
user_profile=bot_profile,
base_url=payload_url,
interface=interface_type,
token=generate_api_key(),
)
if bot_type == UserProfile.INCOMING_WEBHOOK_BOT and service_name:
set_bot_config(bot_profile, "integration_id", service_name)
if bot_type in (UserProfile.INCOMING_WEBHOOK_BOT, UserProfile.EMBEDDED_BOT):
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
notify_created_bot(bot_profile)
api_key = get_api_key(bot_profile)
json_result = dict(
user_id=bot_profile.id,
api_key=api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(json_result)
@require_member_or_admin
def get_bots_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile)
bot_profiles = bot_profiles.select_related(
"default_sending_stream", "default_events_register_stream"
)
bot_profiles = bot_profiles.order_by("date_joined")
def bot_info(bot_profile: UserProfile) -> Dict[str, Any]:
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
# Bots are supposed to have only one API key, at least for now.
# Therefore we can safely assume that one and only valid API key will be
# the first one.
api_key = get_api_key(bot_profile)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success({"bots": list(map(bot_info, bot_profiles))})
@has_request_variables
def get_members_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: Optional[int] = None,
include_custom_profile_fields: bool = REQ(json_validator=check_bool, default=False),
client_gravatar: bool = REQ(json_validator=check_bool, default=False),
) -> HttpResponse:
"""
The client_gravatar field here is set to True if clients can compute
their own gravatars, which saves us bandwidth. We want to eventually
make this the default behavior, but we have old clients that expect
the server to compute this for us.
"""
realm = user_profile.realm
if realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
# If email addresses are only available to administrators,
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
target_user = None
if user_id is not None:
target_user = access_user_by_id(
user_profile, user_id, allow_deactivated=True, allow_bots=True, for_admin=False
)
members = get_raw_user_data(
realm,
user_profile,
target_user=target_user,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=False,
include_custom_profile_fields=include_custom_profile_fields,
)
if target_user is not None:
data: Dict[str, Any] = {"user": members[target_user.id]}
else:
data = {"members": [members[k] for k in members]}
return json_success(data)
@require_realm_admin
@has_request_variables
def create_user_backend(
request: HttpRequest,
user_profile: UserProfile,
email: str = REQ(),
password: str = REQ(),
full_name_raw: str = REQ("full_name"),
) -> HttpResponse:
if not user_profile.can_create_users:
raise JsonableError(_("User not authorized for this query"))
full_name = check_full_name(full_name_raw)
form = CreateUserForm({"full_name": full_name, "email": email})
if not form.is_valid():
raise JsonableError(_("Bad name or username"))
# Check that the new user's email address belongs to the admin's realm
# (Since this is an admin API, we don't require the user to have been
# invited first.)
realm = user_profile.realm
try:
email_allowed_for_realm(email, user_profile.realm)
except DomainNotAllowedForRealmError:
raise JsonableError(
_("Email '{email}' not allowed in this organization").format(
email=email,
)
)
except DisposableEmailError:
raise JsonableError(_("Disposable email addresses are not allowed in this organization"))
except EmailContainsPlusError:
raise JsonableError(_("Email addresses containing + are not allowed."))
try:
get_user_by_delivery_email(email, user_profile.realm)
raise JsonableError(_("Email '{}' already in use").format(email))
except UserProfile.DoesNotExist:
pass
if not check_password_strength(password):
raise JsonableError(PASSWORD_TOO_WEAK_ERROR)
target_user = do_create_user(email, password, realm, full_name, acting_user=user_profile)
return json_success({"user_id": target_user.id})
def get_profile_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
raw_user_data = get_raw_user_data(
user_profile.realm,
user_profile,
target_user=user_profile,
client_gravatar=False,
user_avatar_url_field_optional=False,
)
result: Dict[str, Any] = raw_user_data[user_profile.id]
result["max_message_id"] = -1
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by("-id")[:1]
if messages:
result["max_message_id"] = messages[0].id
return json_success(result)
@has_request_variables
def get_subscription_backend(
request: HttpRequest,
user_profile: UserProfile,
user_id: int = REQ(json_validator=check_int, path_only=True),
stream_id: int = REQ(json_validator=check_int, path_only=True),
) -> HttpResponse:
target_user = access_user_by_id(user_profile, user_id, for_admin=False)
(stream, sub) = access_stream_by_id(user_profile, stream_id)
subscription_status = {"is_subscribed": subscribed_to_stream(target_user, stream_id)}
return json_success(subscription_status)
@has_request_variables
def get_user_by_email(
request: HttpRequest,
user_profile: UserProfile,
email: str,
include_custom_profile_fields: bool = REQ(json_validator=check_bool, default=False),
client_gravatar: bool = REQ(json_validator=check_bool, default=False),
) -> HttpResponse:
realm = user_profile.realm
target_user = None
if email is not None:
try:
target_user = get_user(email, realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
return get_members_backend(request, user_profile, user_id=target_user.id)
|
|
# mysql/reflection.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from ... import log, util
from ... import types as sqltypes
from .enumerated import _EnumeratedValues, SET
from .types import DATETIME, TIME, TIMESTAMP
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.fk_constraints = []
self.ck_constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(') '):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
state.keys.append(spec)
elif type_ == 'fk_constraint':
state.fk_constraints.append(spec)
elif type_ == 'ck_constraint':
state.ck_constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# FOREIGN KEY CONSTRAINT
m = self._re_fk_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'fk_constraint', spec
# CHECK constraint
m = self._re_ck_constraint.match(line)
if m:
spec = m.groupdict()
return 'ck_constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group('name'))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub('', rest_of_line)
for nope in ('auto_increment', 'data directory', 'index directory'):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args = spec['name'], spec['coltype'], spec['arg']
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
if type_args:
type_kw['fsp'] = type_args.pop(0)
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
if issubclass(col_type, SET) and '' in type_args:
type_kw['retrieve_as_bitwise'] = True
type_instance = col_type(*type_args, **type_kw)
col_kw = {}
# NOT NULL
col_kw['nullable'] = True
# this can be "NULL" in the case of TIMESTAMP
if spec.get('notnull', False) == 'NOT NULL':
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default == 'NULL':
# eliminates the need to deal with this later.
default = None
col_d = dict(name=name, type=type_instance, default=default)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table_name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r" "
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
r"(?P<coltype>\w+)"
r"(?:\((?P<arg>(?:\d+|\d+,\d+|"
r"(?:'(?:''|[^'])*',?)+))\))?"
r"(?: +(?P<unsigned>UNSIGNED))?"
r"(?: +(?P<zerofill>ZEROFILL))?"
r"(?: +CHARACTER SET +(?P<charset>[\w_]+))?"
r"(?: +COLLATE +(?P<collate>[\w_]+))?"
r"(?: +(?P<notnull>(?:NOT )?NULL))?"
r"(?: +DEFAULT +(?P<default>"
r"(?:NULL|'(?:''|[^'])*'|[\w\(\)]+"
r"(?: +ON UPDATE [\w\(\)]+)?)"
r"))?"
r"(?: +(?P<autoincr>AUTO_INCREMENT))?"
r"(?: +COMMENT +'(?P<comment>(?:''|[^'])*)')?"
r"(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?"
r"(?: +STORAGE +(?P<storage>\w+))?"
r"(?: +(?P<extra>.*))?"
r",?$"
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>(?:NOT )NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r'(?: +COMMENT +(?P<comment>(\x27\x27|\x27([^\x27])*?\x27)+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
self._re_fk_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# CONSTRAINT `CONSTRAINT_1` CHECK (`x` > 5)'
# testing on MariaDB 10.2 shows that the CHECK constraint
# is returned on a line by itself, so to match without worrying
# about parenthesis in the expresion we go to the end of the line
self._re_ck_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'CHECK +'
r'\((?P<sqltext>.+)\),?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted
# strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex(
'RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(
regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>\w+)' %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>%s)' %
(re.escape(directive), self._optional_equals, regex))
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in instance properties."""
import re
import sys
import uuid
from oslo.config import cfg
import six
from nova import context
from nova import db
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request
from nova import utils
flavor_opts = [
cfg.StrOpt('default_flavor',
default='m1.small',
help='default flavor to use for the EC2 API only. The Nova API '
'does not support a default flavor.'),
]
CONF = cfg.CONF
CONF.register_opts(flavor_opts)
LOG = logging.getLogger(__name__)
# NOTE(luisg): Flavor names can include non-ascii characters so that users can
# create flavor names in locales that use them, however flavor IDs are limited
# to ascii characters.
VALID_ID_REGEX = re.compile("^[\w\.\- ]*$")
VALID_NAME_REGEX = re.compile("^[\w\.\- ]*$", re.UNICODE)
def _int_or_none(val):
if val is not None:
return int(val)
system_metadata_flavor_props = {
'id': int,
'name': str,
'memory_mb': int,
'vcpus': int,
'root_gb': int,
'ephemeral_gb': int,
'flavorid': str,
'swap': int,
'rxtx_factor': float,
'vcpu_weight': _int_or_none,
}
def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
if not flavorid:
flavorid = uuid.uuid4()
kwargs = {
'memory_mb': memory,
'vcpus': vcpus,
'root_gb': root_gb,
'ephemeral_gb': ephemeral_gb,
'swap': swap,
'rxtx_factor': rxtx_factor,
}
if isinstance(name, six.string_types):
name = name.strip()
# ensure name do not exceed 255 characters
utils.check_string_length(name, 'name', min_length=1, max_length=255)
# ensure name does not contain any special characters
valid_name = VALID_NAME_REGEX.search(name)
if not valid_name:
msg = _("Flavor names can only contain alphanumeric characters, "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# NOTE(vish): Internally, flavorid is stored as a string but it comes
# in through json as an integer, so we convert it here.
flavorid = unicode(flavorid)
# ensure leading/trailing whitespaces not present.
if flavorid.strip() != flavorid:
msg = _("id cannot contain leading and/or trailing whitespace(s)")
raise exception.InvalidInput(reason=msg)
# ensure flavor id does not exceed 255 characters
utils.check_string_length(flavorid, 'id', min_length=1,
max_length=255)
# ensure flavor id does not contain any special characters
valid_flavor_id = VALID_ID_REGEX.search(flavorid)
if not valid_flavor_id:
msg = _("Flavor id can only contain letters from A-Z (both cases), "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# Some attributes are positive ( > 0) integers
for option in ['memory_mb', 'vcpus']:
kwargs[option] = utils.validate_integer(kwargs[option], option, 1,
sys.maxint)
# Some attributes are non-negative ( >= 0) integers
for option in ['root_gb', 'ephemeral_gb', 'swap']:
kwargs[option] = utils.validate_integer(kwargs[option], option, 0,
sys.maxint)
# rxtx_factor should be a positive float
try:
kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
if kwargs['rxtx_factor'] <= 0:
raise ValueError()
except ValueError:
msg = _("'rxtx_factor' argument must be a positive float")
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
kwargs['flavorid'] = flavorid
# ensure is_public attribute is boolean
try:
kwargs['is_public'] = strutils.bool_from_string(
is_public, strict=True)
except ValueError:
raise exception.InvalidInput(reason=_("is_public must be a boolean"))
try:
return db.flavor_create(context.get_admin_context(), kwargs)
except db_exc.DBError as e:
LOG.exception(_('DB error: %s') % e)
raise exception.FlavorCreateFailed()
def destroy(name):
"""Marks flavor as deleted."""
try:
if not name:
raise ValueError()
db.flavor_destroy(context.get_admin_context(), name)
except (ValueError, exception.NotFound):
LOG.exception(_('Instance type %s not found for deletion') % name)
raise exception.FlavorNotFoundByName(flavor_name=name)
def get_all_flavors(ctxt=None, inactive=False, filters=None):
"""Get all non-deleted flavors as a dict.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
inst_types = db.flavor_get_all(
ctxt, inactive=inactive, filters=filters)
inst_type_dict = {}
for inst_type in inst_types:
inst_type_dict[inst_type['id']] = inst_type
return inst_type_dict
def get_all_flavors_sorted_list(ctxt=None, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc',
limit=None, marker=None):
"""Get all non-deleted flavors as a sorted list.
Pass true as argument if you want deleted flavors returned also.
"""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_all(ctxt, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit, marker=marker)
def get_default_flavor():
"""Get the default flavor."""
name = CONF.default_flavor
return get_flavor_by_name(name)
def get_flavor(instance_type_id, ctxt=None, inactive=False):
"""Retrieves single flavor by id."""
if instance_type_id is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
if inactive:
ctxt = ctxt.elevated(read_deleted="yes")
return db.flavor_get(ctxt, instance_type_id)
def get_flavor_by_name(name, ctxt=None):
"""Retrieves single flavor by name."""
if name is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_get_by_name(ctxt, name)
# TODO(termie): flavor-specific code should probably be in the API that uses
# flavors.
def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
"""Retrieve flavor by flavorid.
:raises: FlavorNotFound
"""
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
return db.flavor_get_by_flavor_id(ctxt, flavorid, read_deleted)
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
"""Retrieve flavor access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_get_by_flavor_id(ctxt, flavorid)
def add_flavor_access(flavorid, projectid, ctxt=None):
"""Add flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_add(ctxt, flavorid, projectid)
def remove_flavor_access(flavorid, projectid, ctxt=None):
"""Remove flavor access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
return db.flavor_access_remove(ctxt, flavorid, projectid)
def extract_flavor(instance, prefix=''):
"""Create an InstanceType-like object from instance's system_metadata
information.
"""
instance_type = {}
sys_meta = utils.instance_sys_meta(instance)
for key, type_fn in system_metadata_flavor_props.items():
type_key = '%sinstance_type_%s' % (prefix, key)
instance_type[key] = type_fn(sys_meta[type_key])
return instance_type
def save_flavor_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
pci_request.save_flavor_pci_info(metadata, instance_type, prefix)
return metadata
def delete_flavor_info(metadata, *prefixes):
"""Delete flavor instance_type information from instance's system_metadata
by prefix.
"""
for key in system_metadata_flavor_props.keys():
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
pci_request.delete_flavor_pci_info(metadata, *prefixes)
return metadata
|
|
"""
python version of the mlayer code in shadow for reflectivity scans
TODO: graded multilayers
TODO: vectorization
"""
__author__ = 'srio'
#
import numpy
import scipy.constants as codata
tocm = codata.h*codata.c/codata.e*1e2 # 12398.419739640718e-8
from srxraylib.util.h5_simple_writer import H5SimpleWriter
# retrieve a bind of xraylib refractive index that accept NIST compounds
from orangecontrib.xoppy.util.xoppy_xraylib_util import density, Refractive_Index_Re, Refractive_Index_Im
class MLayer(object):
def __init__(self):
self.using_pre_mlayer = False
self.pre_mlayer_dict = None
def read_preprocessor_file(self,filename):
out_dict = {}
fp = open(filename) # Open file on read mode
lines = fp.read().split("\n") # Create a list containing all lines
fp.close() # Close file
index_pointer = 0
np = int(lines[index_pointer])
out_dict['np'] = np
energy = numpy.zeros(np)
index_pointer += 1
# mylist = lines[index_pointer].split(" ")
mylist = lines[index_pointer].split()
for i in range(np):
energy[i] = float(mylist[i])
out_dict["energy"] = energy
delta_s = numpy.zeros_like(energy)
beta_s = numpy.zeros_like(energy)
delta_e = numpy.zeros_like(energy)
beta_e = numpy.zeros_like(energy)
delta_o = numpy.zeros_like(energy)
beta_o = numpy.zeros_like(energy)
for i in range(np):
index_pointer += 1
mylist = lines[index_pointer].strip().split()
delta_s[i] = float(mylist[0])
beta_s[i] = float(mylist[1])
for i in range(np):
index_pointer += 1
mylist = lines[index_pointer].strip().split()
delta_e[i] = float(mylist[0])
beta_e[i] = float(mylist[1])
for i in range(np):
index_pointer += 1
mylist = lines[index_pointer].strip().split()
delta_o[i] = float(mylist[0])
beta_o[i] = float(mylist[1])
out_dict["delta_s"] = delta_s
out_dict["beta_s"] = beta_s
out_dict["delta_e"] = delta_e
out_dict["beta_e"] = beta_e
out_dict["delta_o"] = delta_o
out_dict["beta_o"] = beta_o
# #! srio@esrf.eu 2012-06-07 Nevot-Croce ML roughness model implemented.
# #! By convention, starting from the version that includes ML roughness
# #! we set NPAR negative, in order to assure compatibility with old
# #! versions. If NPAR<0, roughness data are read, if NPAR>0 no roughness.
index_pointer += 1
npair = int(lines[index_pointer])
out_dict["npair"] = npair
thick = numpy.zeros(numpy.abs(npair))
gamma1 = numpy.zeros_like(thick)
mlroughness1 = numpy.zeros_like(thick)
mlroughness2 = numpy.zeros_like(thick)
for i in range(numpy.abs(npair)):
index_pointer += 1
mylist = lines[index_pointer].strip().split()
thick[i] = float(mylist[0])
gamma1[i] = float(mylist[1])
mlroughness1[i] = float(mylist[2])
mlroughness2[i] = float(mylist[3])
out_dict["thick"] = thick
out_dict["gamma1"] = gamma1
out_dict["mlroughness1"] = mlroughness1
out_dict["mlroughness2"] = mlroughness2
index_pointer += 1
igrade = int(lines[index_pointer])
out_dict["igrade"] = igrade
if igrade == 1:
index_pointer += 1
fgrade = int(lines[index_pointer])
out_dict["fgrade"] = fgrade
elif igrade == 2: # igrade=2,
index_pointer += 1
mylist = lines[index_pointer].strip().split(" ")
a0 = float(mylist[0])
a1 = float(mylist[1])
a2 = float(mylist[2])
a3 = float(mylist[3])
out_dict["a0"] = a0
out_dict["a1"] = a1
out_dict["a2"] = a2
out_dict["a3"] = a3
self.pre_mlayer_dict = out_dict
#
# this is copied from shadow3 python preprocessors
#
@classmethod
def pre_mlayer(cls, interactive=False, FILE="pre_mlayer.dat",E_MIN=5000.0,E_MAX=20000.0,
S_DENSITY=2.33,S_MATERIAL="Si",
E_DENSITY=2.40,E_MATERIAL="B4C",
O_DENSITY=9.40,O_MATERIAL="Ru",
GRADE_DEPTH=0,N_PAIRS=70,THICKNESS=33.1,GAMMA=0.483,
ROUGHNESS_EVEN=3.3,ROUGHNESS_ODD=3.1,
FILE_DEPTH="myfile_depth.dat",GRADE_SURFACE=0,FILE_SHADOW="mlayer1.sha",
FILE_THICKNESS="mythick.dat",FILE_GAMMA="mygamma.dat",AA0=1.0,AA1=0.0,AA2=0.0,AA3=0.0):
"""
SHADOW preprocessor for multilayers - python+xraylib version
"""
import xraylib
# input section
if interactive:
print("pre_mlayer: SHADOW preprocessor for multilayers - python+xraylib version")
fileout = input("Name of output file : ")
estart = input("Photon energy (eV) from : ")
estart = float(estart)
efinal = input(" to : ")
efinal = float(efinal)
print(" ")
print("The stack is as follows: ")
print(" ")
print(" vacuum ")
print(" |------------------------------| \ ")
print(" | odd (n) | | ")
print(" |------------------------------| | BILAYER # n ")
print(" | even (n) | | ")
print(" |------------------------------| / ")
print(" | . | ")
print(" | . | ")
print(" | . | ")
print(" |------------------------------| \ ")
print(" | odd (1) | | ")
print(" |------------------------------| | BILAYER # 1 ")
print(" | even (1) | | ")
print(" |------------------------------| / ")
print(" | | ")
print(" |///////// substrate //////////| ")
print(" | | ")
print(" ")
print(" ")
# substrate
matSubstrate = input("Specify the substrate material : ")
denSubstrate = input("Specify the substrate density [g/cm^3] : ")
denSubstrate = float(denSubstrate)
print("Right above the substrate is the even layer material")
matEven = input("Specify the even layer material : ")
denEven = input("Specify the even layer density [g/cm^3] : ")
denEven = float(denEven)
print("Odd layer material is on top of the even layer.")
matOdd = input("Specify the odd layer material : ")
denOdd = input("Specify the odd layer density [g/cm^3] : ")
denOdd = float(denOdd)
#! By convention, starting from the version that includes ML roughness
#! we set NPAR negative, in order to assure compatibility with old
#! versions. If NPAR<0, roughness data are read, if NPAR>0 no roughness.
npair = input("No. of layer pairs : ")
npair = int(npair)
print(" ")
print("Starting from the substrate surface, specify the thickness t :")
print(" t = t(odd) + t(even) in Angstroms,")
print("and the gamma ratio :")
print(" t(even) / (t(odd) + t(even))")
print("for EACH bilayer.")
print(" ")
print("Type two -1 whenever you want the remaining layers ")
print("to assume the thickness, gamma ratio and roughnesses of the previous one.")
print(" ")
#define variables
thick=[0e0]*npair
gamma1=[0e0]*npair
mlroughness1=[0e0]*npair
mlroughness2=[0e0]*npair
for i in range(npair):
tmps = ("thickness [A], gamma ratio, roughness even [A] and roughness odd [A] of bilayer %i: \n"% (i+1) )
tmp = input(tmps)
tmp1 = tmp.split()
if ((i != 0) and (int(float(tmp1[0])) == -1)):
thick[i:(npair-1)] = [thick[i-1]] * (npair-i)
gamma1[i:(npair-1)] = [gamma1[i-1]] * (npair-i)
mlroughness1[i:(npair-1)] = [mlroughness1[i-1]] * (npair-i)
mlroughness2[i:(npair-1)] = [mlroughness2[i-1]] * (npair-i)
break
else:
thick[i] = float(tmp1[0])
gamma1[i] = float(tmp1[1])
mlroughness1[i] = float(tmp1[2])
mlroughness2[i] = float(tmp1[3])
print("***************************************************")
print(" Is the multilayer graded over the surface? ")
print(" 0: No ")
print(" 1: t and/or gamma graded over the surface ")
print(" (input spline files with t and gamma gradient")
print(" 2: t graded over the surface ")
print(" (input quadratic fit to t gradient)")
print(" ")
igrade = input("Is t and/or gamma graded over the surface [0=No/1=Yes] ? ")
igrade = int(igrade)
if igrade == 1:
print("Generation of the spline coefficients for the t and gamma factors")
print("over the surface.")
print("Then GRADE_MLAYER should be run to generate the spline ")
print("coefficients for the t and gamma factors over the surface.")
print("Here just type in the file name that WILL be used to store")
print("the spline coefficients :")
fgrade = input("File name (output from grade_mlayer: ")
elif igrade == 2: # igrade=2, coefficients
print("A second degree polynomial fit of the thickness grading")
print("must be available:")
print("t(y) = BILATER_THICHNESS(y)/BILAYER_THICKNESS(y=0)")
print("t(y) = a0 + a1*y + a2*(y^2) + a3*(y^3) ")
print("a0 (constant term) ")
print("a1 (slope term) ")
print("a2 (quadratic term) ")
print("a3 (cubic term) ")
tmp = input("Enter a0, a1, a2, a3: ")
tmp = tmp.split()
a0 = float(tmp[0])
a1 = float(tmp[1])
a2 = float(tmp[2])
a3 = float(tmp[3])
else:
#--- From input keywords...
fileout = FILE
estart = float(E_MIN)
efinal = float(E_MAX)
# substrate
matSubstrate = S_MATERIAL
denSubstrate = float(S_DENSITY)
matEven = E_MATERIAL
denEven = float(E_DENSITY)
matOdd = O_MATERIAL
denOdd = float(O_DENSITY)
npair = int(N_PAIRS)
#define variables
thick=[0e0]*npair
gamma1=[0e0]*npair
mlroughness1=[0e0]*npair
mlroughness2=[0e0]*npair
for i in range(npair):
thick[i] = float(THICKNESS)
gamma1[i] = float(GAMMA)
mlroughness1[i] = float(ROUGHNESS_EVEN)
mlroughness2[i] = float(ROUGHNESS_ODD)
igrade = int(GRADE_SURFACE)
#TODO: check if needed file_gamma
fgrade = FILE_THICKNESS # raw_input("File name (output from grade_mlayer: ")
a0 = float(AA0)
a1 = float(AA1)
a2 = float(AA2)
a3 = float(AA3)
elfactor = numpy.log10(1.0e4/30.0)/300.0
istart = int(numpy.log10(estart/30.0e0)/elfactor + 1)
ifinal = int(numpy.log10(efinal/30.0e0)/elfactor + 2)
np = int(ifinal - istart) + 1
f = open(fileout, 'wt')
pre_mlayer_dict = {}
f.write("%i \n" % np)
pre_mlayer_dict["np"] = np
ENERGY = numpy.zeros(np)
for i in range(np):
energy = 30e0*numpy.power(10,elfactor*(istart+i-1))
f.write("%e " % energy)
ENERGY[i] = energy
f.write( "\n")
pre_mlayer_dict["energy"] = ENERGY
DELTA = numpy.zeros(np)
BETA = numpy.zeros(np)
for i in range(np): #substrate
energy = 30e0*numpy.power(10,elfactor*(istart+i-1)) *1e-3 # in keV!!
delta = 1e0 - Refractive_Index_Re(matSubstrate,energy,denSubstrate)
beta = Refractive_Index_Im(matSubstrate,energy,denSubstrate)
DELTA[i] = delta
BETA[i] = beta
f.write( ("%26.17e "*2+"\n") % tuple([delta,beta]) )
pre_mlayer_dict["delta_s"] = DELTA
pre_mlayer_dict["beta_s"] = BETA
DELTA = numpy.zeros(np)
BETA = numpy.zeros(np)
for i in range(np): #even
energy = 30e0*numpy.power(10,elfactor*(istart+i-1)) *1e-3 # in keV!!
delta = 1e0 - Refractive_Index_Re(matEven,energy,denEven)
beta = Refractive_Index_Im(matEven,energy,denEven)
DELTA[i] = delta
BETA[i] = beta
f.write( ("%26.17e "*2+"\n") % tuple([delta,beta]) )
pre_mlayer_dict["delta_e"] = DELTA
pre_mlayer_dict["beta_e"] = BETA
DELTA = numpy.zeros(np)
BETA = numpy.zeros(np)
for i in range(np): #odd
energy = 30e0*numpy.power(10,elfactor*(istart+i-1)) *1e-3 # in keV!!
delta = 1e0 - Refractive_Index_Re(matOdd,energy,denOdd)
beta = Refractive_Index_Im(matOdd,energy,denOdd)
DELTA[i] = delta
BETA[i] = beta
f.write( ("%26.17e "*2+"\n") % tuple([delta,beta]) )
pre_mlayer_dict["delta_o"] = DELTA
pre_mlayer_dict["beta_o"] = BETA
#! srio@esrf.eu 2012-06-07 Nevot-Croce ML roughness model implemented.
#! By convention, starting from the version that includes ML roughness
#! we set NPAR negative, in order to assure compatibility with old
#! versions. If NPAR<0, roughness data are read, if NPAR>0 no roughness.
f.write("%i \n" % -npair)
pre_mlayer_dict["npair"] = -npair
for i in range(npair):
f.write( ("%26.17e "*4+"\n") % tuple([thick[i],gamma1[i],mlroughness1[i],mlroughness2[i]]) )
pre_mlayer_dict["thick"] = numpy.array(thick)
pre_mlayer_dict["gamma1"] = numpy.array(gamma1)
pre_mlayer_dict["mlroughness1"] = numpy.array(mlroughness1)
pre_mlayer_dict["mlroughness2"] = numpy.array(mlroughness2)
f.write("%i \n" % igrade)
pre_mlayer_dict["igrade"] = igrade
if igrade == 1:
f.write("%s \n" % fgrade)
pre_mlayer_dict["fgrade"] = fgrade
elif igrade == 2: # igrade=2, coefficients
f.write("%f %f %f %f\n"%(a0,a1,a2,a3))
pre_mlayer_dict["a0"] = a0
pre_mlayer_dict["a1"] = a1
pre_mlayer_dict["a2"] = a2
pre_mlayer_dict["a3"] = a3
f.close()
print("File written to disk: %s" % fileout)
out = MLayer()
out.pre_mlayer_dict = pre_mlayer_dict
out.using_pre_mlayer = True
return out
@classmethod
def initialize_from_bilayer_stack(cls,
material_S="Si", density_S=None, roughness_S=0.0,
material_E="B4C",density_E=None, roughness_E=0.0,
material_O="Ru", density_O=None, roughness_O=0.0,
bilayer_pairs=70,
bilayer_thickness=33.1,
bilayer_gamma=0.483,
):
npair = int(bilayer_pairs)
#define variables
thick = [] #[0e0]*npair
gamma1 = [] #[0e0]*npair
mlroughness1 = [] #[0e0]*npair
mlroughness2 = [] #[0e0]*npair
for i in range(npair):
thick.append(bilayer_thickness) # float(BILAYER_THICKNESS)
gamma1.append(bilayer_gamma) # float(BILAYER_GAMMA)
mlroughness1.append(roughness_E) # float(ROUGHNESS_E)
mlroughness2.append(roughness_O) # float(ROUGHNESS_O)
pre_mlayer_dict = {}
pre_mlayer_dict["np"] = bilayer_pairs
#! srio@esrf.eu 2012-06-07 Nevot-Croce ML roughness model implemented.
#! By convention, starting from the version that includes ML roughness
#! we set NPAR negative, in order to assure compatibility with old
#! versions. If NPAR<0, roughness data are read, if NPAR>0 no roughness.
pre_mlayer_dict["npair"] = -npair
pre_mlayer_dict["thick"] = numpy.array(thick)
pre_mlayer_dict["gamma1"] = numpy.array(gamma1)
pre_mlayer_dict["mlroughness1"] = numpy.array(mlroughness1)
pre_mlayer_dict["mlroughness2"] = numpy.array(mlroughness2)
#These keys are not in the original pre_mlayer_dict
pre_mlayer_dict["material1"] = material_E
pre_mlayer_dict["material2"] = material_O
pre_mlayer_dict["materialS"] = material_S
pre_mlayer_dict["roughnessS"] = roughness_S
pre_mlayer_dict["density1"] = density_E
pre_mlayer_dict["density2"] = density_O
pre_mlayer_dict["densityS"] = density_S
if pre_mlayer_dict["densityS"] is None:
pre_mlayer_dict["densityS"] = density(pre_mlayer_dict["materialS"])
print("Using density for substrate (%s): %f"%(pre_mlayer_dict["materialS"], pre_mlayer_dict["densityS"]))
if pre_mlayer_dict["density1"] is None:
pre_mlayer_dict["density1"] = density(pre_mlayer_dict["material1"])
print("Using density for layer 1 (even) (%s): %f" % (pre_mlayer_dict["material1"], pre_mlayer_dict["density1"]))
if pre_mlayer_dict["density2"] is None:
pre_mlayer_dict["density2"] = density(pre_mlayer_dict["material2"])
print("Using density for layer 2 (odd) (%s): %f" % (pre_mlayer_dict["material2"], pre_mlayer_dict["density2"]))
if isinstance(pre_mlayer_dict["densityS"],str):
pre_mlayer_dict["densityS"] = float(pre_mlayer_dict["densityS"])
if isinstance(pre_mlayer_dict["density1"],str):
pre_mlayer_dict["density1"] = float(pre_mlayer_dict["density1"])
if isinstance(pre_mlayer_dict["density2"], str):
pre_mlayer_dict["density2"] = float(pre_mlayer_dict["density2"])
# fill unused keys
pre_mlayer_dict["energy"] = None
pre_mlayer_dict["delta_s"] = None
pre_mlayer_dict["beta_s"] = None
pre_mlayer_dict["delta_e"] = None
pre_mlayer_dict["beta_e"] = None
pre_mlayer_dict["delta_o"] = None
pre_mlayer_dict["beta_o"] = None
pre_mlayer_dict["igrade"] = None
if pre_mlayer_dict["igrade"] == 1:
pre_mlayer_dict["fgrade"] = None
elif pre_mlayer_dict["igrade"] == 2: # igrade=2, coefficients
pre_mlayer_dict["a0"] = None
pre_mlayer_dict["a1"] = None
pre_mlayer_dict["a2"] = None
pre_mlayer_dict["a3"] = None
# return
out = MLayer()
out.pre_mlayer_dict = pre_mlayer_dict
out.using_pre_mlayer = False
return out
# !
# ! PRE_MLAYER_SCAN
# !
#
# !
# ! this is a simple routine that computes the multilayer reflectivity
# ! with a multilayer defined in a file created by pre_mlayer.
# !
# ! It can be used for testing pre_mlayer, or for simple calculations of
# ! ML reflectivity.
# !
def scan(self,h5file="",
energyN = 51,energy1 = 5000.0,energy2 = 20000.0,
thetaN = 1,theta1 = 0.75,theta2 = 0.75):
if self.pre_mlayer_dict is None:
raise Exception("load preprocessor file before!")
# !calculate
k_what = 1
if (energyN > 1):
energyS = (energy2-energy1)/float(energyN-1)
else:
energyS = 0.0
if (thetaN > 1):
thetaS = (theta2-theta1)/float(thetaN-1)
else:
thetaS = 0.0
R_S_array = numpy.zeros((energyN,thetaN))
R_P_array = numpy.zeros_like(R_S_array)
theta_array = numpy.zeros(thetaN)
energy_array = numpy.zeros(energyN)
for i in range(1,1+thetaN):
for j in range(1,1+energyN):
theta = theta1 + float(i-1) * thetaS
energy = energy1+ float(j-1)*energyS
sin_ref = numpy.sin ( theta * numpy.pi/180)
wnum = 2 * numpy.pi * energy / tocm
COS_POLE = 1.0
R_S,R_P,tmp,phases,phasep = self.reflec(wnum,sin_ref,COS_POLE,k_what)
R_S_array[j-1,i-1] = R_S
R_P_array[j-1,i-1] = R_P
theta_array[i-1] = theta
energy_array[j-1] = energy
if ( (thetaN == 1) and (energyN == 1) ):
print("------------------------------------------------------------------------")
print("Inputs: ")
print(" for E=",energy1,"eV: ")
print(" energy [eV]: ",energy)
print(" grazing angle [deg]: ",theta)
print(" wavelength [A]: ",(1e0/wnum)*2*numpy.pi*1e8)
print(" wavenumber (2 pi/lambda) [cm^-1]: ",wnum)
print("Outputs: ")
print(" R_S: ",R_S)
print(" R_P: ",R_P)
print("------------------------------------------------------------------------")
if h5file != "":
h5_initialize = True
if True: #try:
if h5_initialize:
h5w = H5SimpleWriter.initialize_file(h5file, creator="xoppy_multilayer.py")
else:
h5w = H5SimpleWriter(h5file, None)
h5_entry_name = "MLayer"
h5w.create_entry(h5_entry_name,nx_default="reflectivity-s")
if energyN == 1:
h5w.add_dataset(theta_array, R_S_array[0]**2, dataset_name="reflectivity-s", entry_name=h5_entry_name,
title_x="Grazing angle [deg]", title_y="Reflectivity-s")
elif thetaN == 1:
h5w.add_dataset(energy_array, R_S_array[:,0]**2, dataset_name="reflectivity-s", entry_name=h5_entry_name,
title_x="Photon energy [eV]", title_y="Reflectivity-s")
else:
# h5w.create_entry(h5_entry_name, nx_default="EnergyAngleScan")
h5w.add_image(R_S_array**2, energy_array, theta_array, image_name="EnergyAngleScan",
entry_name=h5_entry_name,
title_x="Photon Energy [eV]",
title_y="Grazing Angle [deg]")
h5w.create_entry("parameters", root_entry=h5_entry_name, nx_default=None)
for key in self.pre_mlayer_dict.keys():
try:
h5w.add_key(key, self.pre_mlayer_dict[key], entry_name=h5_entry_name + "/parameters")
except:
pass
print("File written to disk: %s" % h5file)
# except:
# raise Exception("ERROR writing h5 file")
return R_S_array,R_P_array,energy_array,theta_array
def reflec(self,WNUM,SIN_REF,COS_POLE,K_WHAT):
# ! C+++
# ! C SUBROUTINE REFLEC
# ! C
# ! C PURPOSE To compute the local reflectivity of a mirror or
# ! C multilayer. Also compute filter transmittivity.
# ! C
# ! C
# ! C ARGUMENTS [ I ] PIN : (x,y,z) of the intercept
# ! C [ I ] wnum : wavenumber (cm-1)
# ! C [ I ] sin_ref : sine of angle from surface
# ! C [ I ] cos_pole : cosine of angle of normal from pole
# ! C [ O ] R_P : p-pol reflection coefficient
# ! C [ O ] R_S : s-pol " "
# ! C
# ! C---
phases = 0.0
phasep = 0.0
NIN = self.pre_mlayer_dict["np"]
PHOT_ENER = WNUM * tocm / (2 * numpy.pi) # eV
NPAIR = numpy.abs(self.pre_mlayer_dict["npair"])
XLAM = 2 * numpy.pi / WNUM * 1.0e8 # Angstrom
gamma1 = self.pre_mlayer_dict["gamma1"]
t_oe = self.pre_mlayer_dict["thick"]
# gamma1 = ratio t(even)/(t(odd)+t(even)) of each layer pair
t_e = gamma1 * t_oe
t_o = (1.0 - gamma1) * t_oe
mlroughness1 = self.pre_mlayer_dict["mlroughness1"]
mlroughness2 = self.pre_mlayer_dict["mlroughness2"]
if self.using_pre_mlayer:
ENER = self.pre_mlayer_dict["energy"]
wnum = 2 * numpy.pi * ENER / tocm
QMIN = wnum[0]
QSTEP = wnum[1] - wnum[0]
DELTA_S = self.pre_mlayer_dict["delta_s"]
DELTA_E = self.pre_mlayer_dict["delta_e"]
DELTA_O = self.pre_mlayer_dict["delta_o"]
BETA_S = self.pre_mlayer_dict["beta_s"]
BETA_E = self.pre_mlayer_dict["beta_e"]
BETA_O = self.pre_mlayer_dict["beta_o"]
i_grade = self.pre_mlayer_dict["igrade"]
# TODO graded ml
# ! C
# ! C Is the multilayer thickness graded ?
# ! C
# read (iunit,*) i_grade
# ! 0=None
# ! 1=spline files
# ! 2=quadic coefficients
#
# ! spline
# if (i_grade.eq.1) then
# read (iunit,'(a)') file_grade
# OPEN (45, FILE=adjustl(FILE_GRADE), STATUS='OLD', &
# FORM='UNFORMATTED', IOSTAT=iErr)
# ! srio added test
# if (iErr /= 0 ) then
# print *,"REFLEC: File not found: "//trim(adjustl(file_grade))
# print *,'Error: REFLEC: File not found. Aborted.'
# ! stop 'File not found. Aborted.'
# end if
#
# READ (45) NTX, NTY
# READ (45) TX,TY
# !DO 205 I = 1, NTX
# !DO 205 J = 1, NTY
# DO I = 1, NTX
# DO J = 1, NTY
# READ (45) TSPL(1,I,1,J),TSPL(1,I,2,J), & ! spline for t
# TSPL(2,I,1,J),TSPL(2,I,2,J)
# END DO
# END DO
#
# READ (45) NGX, NGY
# READ (45) GX,GY
# DO I = 1, NGX
# DO J = 1, NGY
# READ (45) GSPL(1,I,1,J),GSPL(1,I,2,J), & ! spline for gamma
# GSPL(2,I,1,J),GSPL(2,I,2,J)
# END DO
# END DO
#
# CLOSE (45)
# end if
#
# if (i_grade.eq.2) then ! quadric coefficients
# !
# ! laterally gradded multilayer
# !
#
# ! srio@esrf.eu added cubic term (requested B Meyer, LNLS)
# read(iunit,*,IOSTAT=iErr) lateral_grade_constant,lateral_grade_slope, &
# lateral_grade_quadratic,lateral_grade_cubic
#
# end if
#
# close(unit=iunit)
# tfilm = absor
# RETURN
# END IF
# END IF
# ! C
# ! C Multilayers reflectivity.
# ! C First interpolate for all the refractive indices.
# ! C
ELFACTOR = numpy.log10(1.0e4/30.0e0)/300.0e0
index1 = numpy.log10(PHOT_ENER/ENER[0])/ELFACTOR
index1 = int(index1)
DELS = DELTA_S[index1] + (DELTA_S[index1+1] - DELTA_S[index1]) *(PHOT_ENER - ENER[index1])/(ENER[index1+1] - ENER[index1])
BETS = BETA_S[index1] + ( BETA_S[index1+1] - BETA_S[index1]) *(PHOT_ENER - ENER[index1])/(ENER[index1+1] - ENER[index1])
DELE = DELTA_E[index1] + (DELTA_E[index1+1] - DELTA_E[index1]) *(PHOT_ENER - ENER[index1])/(ENER[index1+1] - ENER[index1])
BETE = BETA_E[index1] + ( BETA_E[index1+1] - BETA_E[index1]) *(PHOT_ENER - ENER[index1])/(ENER[index1+1] - ENER[index1])
DELO = DELTA_O[index1] + (DELTA_O[index1+1] - DELTA_O[index1]) *(PHOT_ENER - ENER[index1])/(ENER[index1+1] - ENER[index1])
BETO = BETA_O[index1] + ( BETA_O[index1+1] - BETA_O[index1]) *(PHOT_ENER - ENER[index1])/(ENER[index1+1] - ENER[index1])
else: # not using preprocessor, using xraylib
DELS = 1.0 - Refractive_Index_Re(self.pre_mlayer_dict["materialS"],1e-3*PHOT_ENER,self.pre_mlayer_dict["densityS"])
BETS = Refractive_Index_Im(self.pre_mlayer_dict["materialS"],1e-3*PHOT_ENER,self.pre_mlayer_dict["densityS"])
DELE = 1.0 - Refractive_Index_Re(self.pre_mlayer_dict["material1"],1e-3*PHOT_ENER,self.pre_mlayer_dict["density1"])
BETE = Refractive_Index_Im(self.pre_mlayer_dict["material1"],1e-3*PHOT_ENER,self.pre_mlayer_dict["density1"])
DELO = 1.0 - Refractive_Index_Re(self.pre_mlayer_dict["material2"],1e-3*PHOT_ENER,self.pre_mlayer_dict["density2"])
BETO = Refractive_Index_Im(self.pre_mlayer_dict["material2"],1e-3*PHOT_ENER,self.pre_mlayer_dict["density2"])
TFACT = 1.0
GFACT = 1.0
#TODO graded multilayers
# IF (I_GRADE.EQ.1) THEN
# XIN = PIN(1)
# YIN = PIN(2)
# CALL DBCEVL (TX,NTX,TY,NTY,TSPL,i101,XIN,YIN,PDS,IER)
# IF (IER.NE.0) THEN
# CALL MSSG ('REFLEC','Spline error # ',IER)
# RETURN
# END IF
# TFACT = PDS(1)
# ! C
# CALL DBCEVL (GX,NGX,GY,NGY,GSPL,i101,XIN,YIN,PDS,IER)
# IF (IER.NE.0) THEN
# CALL MSSG ('REFLEC','Spline error # ',IER)
# RETURN
# END IF
# GFACT = PDS(1)
# ELSE IF (I_GRADE.EQ.2) THEN
# TFACT = lateral_grade_constant+ &
# lateral_grade_slope*pin(2) + &
# lateral_grade_quadratic*pin(2)*pin(2) + &
# lateral_grade_cubic*pin(2)*pin(2)*pin(2)
# ELSE
# END IF
#
#
R_S,R_P,PHASES,PHASEP = self.fresnel(TFACT,GFACT,NPAIR,SIN_REF,COS_POLE,XLAM,
DELO,DELE,DELS,BETO,BETE,BETS,t_o,t_e,mlroughness1,mlroughness2)
return R_S,R_P,0,phases,phasep
def fresnel(self,TFACT,GFACT,NPAIR,SIN_REF,COS_POLE,XLAM,
delo,dele,dels,beto,bete,bets,t_o,t_e,mlroughness1,mlroughness2):
# !C------------------------------------------------------------------------------
# !C subroutine FRESNEL
# !C------------------------------------------------------------------------------
# !c compute x-ray/u.v. reflection efficiency of multilayers
# !c
# !c inputs:
# !c tfact : used for ML with graded thickness (thickness coeff)
# !c gfact : used for ML with graded thickness (gamma coeff)
# !c n : number of bilayers
# !c sin_ref : sin of angle of incidence (grazing??)
# !c cos_pole: cos of angle of between normal and pole??
# !c
# !c delo,dele,dels = parameter delta odd, even, substrate respectively
# !c belo,bele,bels = parametro beta odd, even, substrate respectively
# !c 1.0 - delo - i*beto = complex refractive index (odd)
# !c 1.0 - dele - i*bete = complex refractive index (even)
# !c t_o = thickness of odd layers (a)
# !c t_e = thickness of even layers (a)
# !c outputs:
# !c ans = S polarization reflectivity
# !c anp = P polarization reflectivity
# !c phaseS = change of phase S
# !c phaseP = change di phase P
# !c
# !c----------------------------------------------------------------------------
# !C
# !C
# !C vacuum
# !C |------------------------------| \
# !C | odd (n) | |
# !C |------------------------------| | BILAYER # n
# !C | even (n) | |
# !C |------------------------------| /
# !C | . |
# !C | . |
# !C | . |
# !C |------------------------------| \
# !C | odd (1) | |
# !C |------------------------------| | BILAYER # 1
# !C | even (1) | |
# !C |------------------------------| /
# !C | |
# !C |///////// substrate //////////|
# !C | |
# !C
# !c----------------------------------------------------------------------------
# !c----------------------------------------------------------------------------
ci = 0.+1.0j
# ! (refraction index "odd,even,substrate")**2
ro2 = (1.0 - delo - ci * beto)**2
re2 = (1.0 - dele - ci * bete)**2
rs2 = (1.0 - dels - ci * bets)**2
# ! angles
SIN_REF2 = SIN_REF**2
COS_REF2 = 1.0 - SIN_REF2
fo = ro2 - COS_REF2
fe = re2 - COS_REF2
refv = SIN_REF2
xmfv = 0.0
fv = refv + ci * xmfv
fs = rs2 - COS_REF2
fo = numpy.sqrt(fo) # complex!!
fe = numpy.sqrt(fe) # complex!!
fv = numpy.sqrt(fv) # complex!!
fs = numpy.sqrt(fs) # complex!!
# ! Fresnel formula "S" (in function of incidence angle and critical angle)
ffe = (fe-fo)/(fe+fo)
ffo = -ffe
ffv = (fv-fo)/(fv+fo)
ffs = (fe-fs)/(fe+fs)
# ! Fresnel formula "P" (in function of incidence angle and critical angle)
ffep = (fe/re2-fo/ro2)/(fe/re2+fo/ro2)
ffop = -ffep
ffvp = (fv-fo/ro2)/(fv+fo/ro2)
ffsp = (fe/re2-fs/rs2)/(fe/re2+fs/rs2)
if NPAIR == 0: # now there is only substrate and vacuum
fe = fv
fo = fs
# ! Fresnel formula "S" (in function of incidence angle and critical angle)
ffe = (fe - fo) / (fe + fo)
ffo = -ffe
ffv = (fv - fo) / (fv + fo)
ffs = (fe - fs) / (fe + fs)
# ! Fresnel formula "P" (in function of incidence angle and critical angle)
ffep = (fe / re2 - fo / ro2) / (fe / re2 + fo / ro2)
ffop = -ffep
ffvp = (fv - fo / ro2) / (fv + fo / ro2)
ffsp = (fe / re2 - fs / rs2) / (fe / re2 + fs / rs2)
# ! another way
# ! ro=(1.0D0-delo-ci*beto)
# ! re=(1.0D0-dele-ci*bete)
# ! rs=(1.0D0-dels-ci*bets)
# !
# ! !
# ! cos_ref = sqrt(1.0D0-sin_ref**2) ! in vacuum
# ! !!! snell (top to bottom propagation)
# ! cos_o = (1.0D0/ro)*cos_ref ! in odd medium
# ! cos_e = (ro/re)*cos_o ! in even medium
# ! cos_s = (re/rs)*cos_e ! in substrate medium
# !
# ! sin_o = mysqrt(1.0d0 - cos_o**2)
# ! sin_e = mysqrt(1.0d0 - cos_e**2)
# ! sin_s = mysqrt(1.0d0 - cos_s**2)
# !
# ! ! even->odd interface
# ! ffe = (re*sin_e - ro*sin_o)/ & ! e->o
# ! (re*sin_e + ro*sin_o)
# ! ffo=-ffe
# ! ffv = (sin_ref - ro*sin_o )/ & ! v->o
# ! (sin_ref + ro*sin_o )
# ! ! even->substrate interface
# ! ffs = (re*sin_e - rs*sin_s )/ & ! e->s
# ! (re*sin_e + rs*sin_s)
# !
# ! ! p-polarization
# ! ffep = (re*sin_o - ro*sin_e)/ & ! e->o
# ! (re*sin_o + ro*sin_e)
# !
# ! ffop=-ffep
# ! ffvp = (sin_o - ro*sin_ref )/& !v->o
# ! (sin_o + ro*sin_ref )
# !
# ! ffsp = (re*sin_s - rs*sin_e )/ & ! e->s
# ! (re*sin_s + rs*sin_e)
r = 0.0 + 0.0j
rp = 0.0 + 0.0j
prefact = (8.*(numpy.pi**2.)) / (XLAM**2)
# !c Nevot-Croce roughness
# !c DO NOT include refraction index in the roughness formula
sigma_s2 = 0.0 # ! sigma_s**2.0 !roughn. substrate
sigma_v2 = 0.0 # ! sigma_v**2.0!roughn. vacuum
# ! loop over the bilayers
# ! remember that "even" is the bottom sublayer
for j in range(NPAIR): # =1,n ! n is the number of bilayers
# ! C
# ! C compute the thickness for the odd and even material :
# ! C
ao = -ci * (numpy.pi * fo * t_o[j] * COS_POLE / XLAM)
ae = -ci * (numpy.pi * fe * t_e[j] * COS_POLE / XLAM)
ao = numpy.exp(ao)
ae = numpy.exp(ae)
if j != 0:
sigma_e2 = mlroughness1[j]**2.0 #!roughn. even layer
arg_e = fo * fe * sigma_e2 / (numpy.sqrt(ro2) * numpy.sqrt(re2))
fnevot_e = numpy.exp(-prefact * arg_e)
r = (ae**4) * (r + ffe * fnevot_e) / (r * ffe * fnevot_e + 1.0)
rp = (ae**4) * (rp + ffep * fnevot_e) / (rp * ffep * fnevot_e + 1.0)
else:
# ! layer on top of substrate
arg_s = fe * fs * sigma_s2 / (numpy.sqrt(re2) * numpy.sqrt(rs2))
fnevot_s = numpy.exp(-prefact * arg_s)
r = (ae**4.0) * (r + ffs * fnevot_s) / (r * ffs * fnevot_s + 1.0)
rp = (ae**4.0) * (rp + ffsp * fnevot_s) / (rp * ffsp * fnevot_s + 1.0)
# ! odd layer (top sublayer)
sigma_o2 = mlroughness2[j]**2.0 #!roughn. odd layer
arg_o = fo * fe * sigma_o2 / (numpy.sqrt(ro2) * numpy.sqrt(re2))
fnevot_o = numpy.exp(-prefact * arg_o)
r = (ao**4.0) * (r + ffo * fnevot_o) / (r * ffo * fnevot_o + 1.0)
rp = (ao**4.0) * (rp + ffop * fnevot_o) / (rp * ffop * fnevot_o + 1.0)
# !
# ! vacuum interface
# !
arg_v = fo * fv * sigma_v2 / numpy.sqrt(ro2)
fnevot_v = numpy.exp(-prefact * arg_v)
r = (r + ffv * fnevot_v) / (r * ffv * fnevot_v + 1.0)
rp = (rp + ffvp * fnevot_v) / (rp * ffvp * fnevot_v + 1.0)
#
# !
# ! calculate phases
# !
PHASES = numpy.arctan2(r.imag,r.real)
ans = numpy.abs(r)
PHASEP = numpy.arctan2(rp.imag,rp.real)
anp = numpy.abs(rp)
return ans,anp,PHASES,PHASEP
if __name__ == "__main__":
from srxraylib.plot.gol import plot
a = MLayer.pre_mlayer(
interactive=False,
FILE="pre_mlayer.dat",
E_MIN=100.0, E_MAX=500.0,
O_DENSITY=7.19, O_MATERIAL="Cr", #"Water, Liquid", # odd: closer to vacuum
E_DENSITY=3.00, E_MATERIAL="Sc", # even: closer to substrate
S_DENSITY=2.33, S_MATERIAL="Si", # substrate
GRADE_DEPTH=0,
N_PAIRS=50,
THICKNESS=22.0,
GAMMA=10.0/22.0, # gamma ratio = t(even) / (t(odd) + t(even))")
ROUGHNESS_EVEN=0.0,
ROUGHNESS_ODD=0.0,
FILE_DEPTH="myfile_depth.dat",
GRADE_SURFACE=0,
FILE_SHADOW="mlayer1.sha",
FILE_THICKNESS="mythick.dat",
FILE_GAMMA="mygamma.dat",
AA0=1.0,AA1=0.0,AA2=0.0,AA3=0.0)
b = MLayer()
b.read_preprocessor_file("pre_mlayer.dat")
#
# energy scan
#
rs, rp, e, t = a.scan(h5file="",
energyN=100,energy1=300.0,energy2=500.0,
thetaN=1,theta1=45.0,theta2=45.0)
print(rs.shape,rp.shape,e.shape,t.shape)
plot(e,rs[:,0],xtitle="Photon energy [eV]",ytitle="Reflectivity")
#
# theta scan
#
rs, rp, e, t = a.scan(h5file="",
energyN=1,energy1=400.0,energy2=401.0,
thetaN=1000,theta1=40.0,theta2=50.0)
print(rs.shape,rp.shape,e.shape,t.shape)
plot(t,rs[0],xtitle="angle [deg]",ytitle="Reflectivity",ylog=False)
#
# single point
#
a.scan(h5file="",
energyN=1,energy1=398.0,thetaN=1,theta1=45.0)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import overload, Optional, Dict
from py4j.java_gateway import JavaObject, JVMView
from pyspark.util import _parse_memory # type: ignore[attr-defined]
class ExecutorResourceRequest:
"""
An Executor resource request. This is used in conjunction with the ResourceProfile to
programmatically specify the resources needed for an RDD that will be applied at the
stage level.
This is used to specify what the resource requirements are for an Executor and how
Spark can find out specific details about those resources. Not all the parameters are
required for every resource type. Resources like GPUs are supported and have same limitations
as using the global spark configs spark.executor.resource.gpu.*. The amount, discoveryScript,
and vendor parameters for resources are all the same parameters a user would specify through the
configs: spark.executor.resource.{resourceName}.{amount, discoveryScript, vendor}.
For instance, a user wants to allocate an Executor with GPU resources on YARN. The user has
to specify the resource name (gpu), the amount or number of GPUs per Executor,
the discovery script would be specified so that when the Executor starts up it can
discovery what GPU addresses are available for it to use because YARN doesn't tell
Spark that, then vendor would not be used because its specific for Kubernetes.
See the configuration and cluster specific docs for more details.
Use :py:class:`pyspark.ExecutorResourceRequests` class as a convenience API.
.. versionadded:: 3.1.0
Parameters
----------
resourceName : str
Name of the resource
amount : str
Amount requesting
discoveryScript : str, optional
Optional script used to discover the resources. This is required on some
cluster managers that don't tell Spark the addresses of the resources
allocated. The script runs on Executors startup to discover the addresses
of the resources available.
vendor : str, optional
Vendor, required for some cluster managers
Notes
-----
This API is evolving.
"""
def __init__(
self,
resourceName: str,
amount: int,
discoveryScript: str = "",
vendor: str = "",
):
self._name = resourceName
self._amount = amount
self._discovery_script = discoveryScript
self._vendor = vendor
@property
def resourceName(self) -> str:
return self._name
@property
def amount(self) -> int:
return self._amount
@property
def discoveryScript(self) -> str:
return self._discovery_script
@property
def vendor(self) -> str:
return self._vendor
class ExecutorResourceRequests:
"""
A set of Executor resource requests. This is used in conjunction with the
:class:`pyspark.resource.ResourceProfileBuilder` to programmatically specify the
resources needed for an RDD that will be applied at the stage level.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
_CORES = "cores"
_MEMORY = "memory"
_OVERHEAD_MEM = "memoryOverhead"
_PYSPARK_MEM = "pyspark.memory"
_OFFHEAP_MEM = "offHeap"
@overload
def __init__(self, _jvm: JVMView):
...
@overload
def __init__(
self,
_jvm: None = ...,
_requests: Optional[Dict[str, ExecutorResourceRequest]] = ...,
):
...
def __init__(
self,
_jvm: Optional[JVMView] = None,
_requests: Optional[Dict[str, ExecutorResourceRequest]] = None,
):
from pyspark import SparkContext
_jvm = _jvm or SparkContext._jvm # type: ignore[attr-defined]
if _jvm is not None:
self._java_executor_resource_requests = (
_jvm.org.apache.spark.resource.ExecutorResourceRequests()
)
if _requests is not None:
for k, v in _requests.items():
if k == self._MEMORY:
self._java_executor_resource_requests.memory(str(v.amount))
elif k == self._OVERHEAD_MEM:
self._java_executor_resource_requests.memoryOverhead(str(v.amount))
elif k == self._PYSPARK_MEM:
self._java_executor_resource_requests.pysparkMemory(str(v.amount))
elif k == self._CORES:
self._java_executor_resource_requests.cores(v.amount)
else:
self._java_executor_resource_requests.resource(
v.resourceName, v.amount, v.discoveryScript, v.vendor
)
else:
self._java_executor_resource_requests = None
self._executor_resources: Dict[str, ExecutorResourceRequest] = {}
def memory(self, amount: str) -> "ExecutorResourceRequests":
if self._java_executor_resource_requests is not None:
self._java_executor_resource_requests.memory(amount)
else:
self._executor_resources[self._MEMORY] = ExecutorResourceRequest(
self._MEMORY, _parse_memory(amount)
)
return self
def memoryOverhead(self, amount: str) -> "ExecutorResourceRequests":
if self._java_executor_resource_requests is not None:
self._java_executor_resource_requests.memoryOverhead(amount)
else:
self._executor_resources[self._OVERHEAD_MEM] = ExecutorResourceRequest(
self._OVERHEAD_MEM, _parse_memory(amount)
)
return self
def pysparkMemory(self, amount: str) -> "ExecutorResourceRequests":
if self._java_executor_resource_requests is not None:
self._java_executor_resource_requests.pysparkMemory(amount)
else:
self._executor_resources[self._PYSPARK_MEM] = ExecutorResourceRequest(
self._PYSPARK_MEM, _parse_memory(amount)
)
return self
def offheapMemory(self, amount: str) -> "ExecutorResourceRequests":
if self._java_executor_resource_requests is not None:
self._java_executor_resource_requests.offHeapMemory(amount)
else:
self._executor_resources[self._OFFHEAP_MEM] = ExecutorResourceRequest(
self._OFFHEAP_MEM, _parse_memory(amount)
)
return self
def cores(self, amount: int) -> "ExecutorResourceRequests":
if self._java_executor_resource_requests is not None:
self._java_executor_resource_requests.cores(amount)
else:
self._executor_resources[self._CORES] = ExecutorResourceRequest(self._CORES, amount)
return self
def resource(
self,
resourceName: str,
amount: int,
discoveryScript: str = "",
vendor: str = "",
) -> "ExecutorResourceRequests":
if self._java_executor_resource_requests is not None:
self._java_executor_resource_requests.resource(
resourceName, amount, discoveryScript, vendor
)
else:
self._executor_resources[resourceName] = ExecutorResourceRequest(
resourceName, amount, discoveryScript, vendor
)
return self
@property
def requests(self) -> Dict[str, ExecutorResourceRequest]:
if self._java_executor_resource_requests is not None:
result = {}
execRes = self._java_executor_resource_requests.requestsJMap()
for k, v in execRes.items():
result[k] = ExecutorResourceRequest(
v.resourceName(), v.amount(), v.discoveryScript(), v.vendor()
)
return result
else:
return self._executor_resources
class TaskResourceRequest:
"""
A task resource request. This is used in conjunction with the
:class:`pyspark.resource.ResourceProfile` to programmatically specify the resources
needed for an RDD that will be applied at the stage level. The amount is specified
as a Double to allow for saying you want more than 1 task per resource. Valid values
are less than or equal to 0.5 or whole numbers.
Use :class:`pyspark.resource.TaskResourceRequests` class as a convenience API.
Parameters
----------
resourceName : str
Name of the resource
amount : float
Amount requesting as a float to support fractional resource requests.
Valid values are less than or equal to 0.5 or whole numbers.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
def __init__(self, resourceName: str, amount: float):
self._name = resourceName
self._amount = float(amount)
@property
def resourceName(self) -> str:
return self._name
@property
def amount(self) -> float:
return self._amount
class TaskResourceRequests:
"""
A set of task resource requests. This is used in conjunction with the
:class:`pyspark.resource.ResourceProfileBuilder` to programmatically specify the resources
needed for an RDD that will be applied at the stage level.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
_CPUS = "cpus"
@overload
def __init__(self, _jvm: JVMView):
...
@overload
def __init__(
self,
_jvm: None = ...,
_requests: Optional[Dict[str, TaskResourceRequest]] = ...,
):
...
def __init__(
self,
_jvm: Optional[JVMView] = None,
_requests: Optional[Dict[str, TaskResourceRequest]] = None,
):
from pyspark import SparkContext
_jvm = _jvm or SparkContext._jvm # type: ignore[attr-defined]
if _jvm is not None:
self._java_task_resource_requests: Optional[
JavaObject
] = _jvm.org.apache.spark.resource.TaskResourceRequests()
if _requests is not None:
for k, v in _requests.items():
if k == self._CPUS:
self._java_task_resource_requests.cpus(int(v.amount))
else:
self._java_task_resource_requests.resource(v.resourceName, v.amount)
else:
self._java_task_resource_requests = None
self._task_resources: Dict[str, TaskResourceRequest] = {}
def cpus(self, amount: int) -> "TaskResourceRequests":
if self._java_task_resource_requests is not None:
self._java_task_resource_requests.cpus(amount)
else:
self._task_resources[self._CPUS] = TaskResourceRequest(self._CPUS, amount)
return self
def resource(self, resourceName: str, amount: float) -> "TaskResourceRequests":
if self._java_task_resource_requests is not None:
self._java_task_resource_requests.resource(resourceName, float(amount))
else:
self._task_resources[resourceName] = TaskResourceRequest(resourceName, amount)
return self
@property
def requests(self) -> Dict[str, TaskResourceRequest]:
if self._java_task_resource_requests is not None:
result = {}
taskRes = self._java_task_resource_requests.requestsJMap()
for k, v in taskRes.items():
result[k] = TaskResourceRequest(v.resourceName(), v.amount())
return result
else:
return self._task_resources
|
|
"""
Cause-effect models.
"""
# Author: Jose A. R. Fonollosa <jarfo@yahoo.com>
#
# License: Apache, Version 2.0
from multiprocessing import Pool
import numpy as np
from sklearn import pipeline
from sklearn.base import BaseEstimator
from sklearn.ensemble import GradientBoostingClassifier
import features as f
gbc_params = {
'loss':'deviance',
'learning_rate': 0.1,
'n_estimators': 500,
'subsample': 1.0,
'min_samples_split': 8,
'min_samples_leaf': 1,
'max_depth': 9,
'init': None,
'random_state': 1,
'max_features': None,
'verbose': 0
}
selected_features = [
'Adjusted Mutual Information[A,A type,B,B type]',
'Conditional Distribution Entropy Variance[A,A type,B,B type]',
'Conditional Distribution Entropy Variance[B,B type,A,A type]',
'Conditional Distribution Kurtosis Variance[A,A type,B,B type]',
'Conditional Distribution Kurtosis Variance[B,B type,A,A type]',
'Conditional Distribution Similarity[A,A type,B,B type]',
'Conditional Distribution Similarity[B,B type,A,A type]',
'Conditional Distribution Skewness Variance[A,A type,B,B type]',
'Conditional Distribution Skewness Variance[B,B type,A,A type]',
'Discrete Conditional Entropy[A,A type,B,B type]',
'Discrete Conditional Entropy[B,B type,A,A type]',
'Discrete Entropy[A,A type]',
'Discrete Entropy[B,B type]',
'Discrete Mutual Information[A,A type,B,B type]',
'HSIC[A,A type,B,B type]',
'IGCI[A,A type,B,B type]',
'IGCI[B,B type,A,A type]',
'Kurtosis[A,A type]',
'Kurtosis[B,B type]',
'Log[Number of Samples[A]]',
'Log[Number of Unique Samples[A]]',
'Log[Number of Unique Samples[B]]',
'Moment21[A,A type,B,B type]',
'Moment21[B,B type,A,A type]',
'Moment31[A,A type,B,B type]',
'Moment31[B,B type,A,A type]',
'Normalized Discrete Entropy[A,A type]',
'Normalized Discrete Entropy[B,B type]',
'Normalized Discrete Mutual Information[Discrete Mutual Information[A,A type,B,B type],Discrete Joint Entropy[A,A type,B,B type]]',
'Normalized Discrete Mutual Information[Discrete Mutual Information[A,A type,B,B type],Min[Discrete Entropy[A,A type],Discrete Entropy[B,B type]]]',
'Normalized Entropy[A,A type]',
'Normalized Entropy[B,B type]',
'Normalized Error Probability[A,A type,B,B type]',
'Normalized Error Probability[B,B type,A,A type]',
# 'Number of Unique Samples[A]',
# 'Number of Unique Samples[B]',
'Pearson R[A,A type,B,B type]',
'Polyfit Error[A,A type,B,B type]',
'Polyfit Error[B,B type,A,A type]',
'Polyfit[A,A type,B,B type]',
'Polyfit[B,B type,A,A type]',
'Skewness[A,A type]',
'Skewness[B,B type]',
'Uniform Divergence[A,A type]',
'Uniform Divergence[B,B type]'
]
class Pipeline(pipeline.Pipeline):
def predict(self, X):
try:
p = super(Pipeline, self).predict_proba(X)
if p.shape[1] == 2:
p = p[:,1]
elif p.shape[1] == 3:
p = p[:,2] - p[:,0]
except AttributeError:
p = super(Pipeline, self).predict(X)
return p
def get_pipeline(features, regressor=None, params=None):
steps = [
("extract_features", f.FeatureMapper(features)),
("regressor", regressor(**params)),
]
return Pipeline(steps)
class CauseEffectEstimatorOneStep(BaseEstimator):
def __init__(self, features=None, regressor=None, params=None, symmetrize=True):
self.extractor = f.extract_features
self.classifier = get_pipeline(features, regressor, params)
self.symmetrize = symmetrize
def extract(self, features):
return self.extractor(features)
def fit(self, X, y=None):
self.classifier.fit(X, y)
return self
def fit_transform(self, X, y=None):
return self.classifier.fit_transform(X, y)
def transform(self, X):
return self.classifier.transform(X)
def predict(self, X):
predictions = self.classifier.predict(X)
if self.symmetrize:
predictions[0::2] = (predictions[0::2] - predictions[1::2])/2
predictions[1::2] = -predictions[0::2]
return predictions
class CauseEffectEstimatorSymmetric(BaseEstimator):
def __init__(self, features=None, regressor=None, params=None, symmetrize=True):
self.extractor = f.extract_features
self.classifier_left = get_pipeline(features, regressor, params)
self.classifier_right = get_pipeline(features, regressor, params)
self.symmetrize = symmetrize
def extract(self, features):
return self.extractor(features)
def fit(self, X, y=None):
target_left = np.array(y)
target_left[target_left != 1] = 0
weight_left = np.ones(len(target_left))
weight_left[target_left==0] = sum(target_left==1)/float(sum(target_left==0))
try:
self.classifier_left.fit(X, target_left, regressor__sample_weight=weight_left)
except TypeError:
self.classifier_left.fit(X, target_left)
target_right = np.array(y)
target_right[target_right != -1] = 0
target_right[target_right == -1] = 1
weight_right = np.ones(len(target_right))
weight_right[target_right==0] = sum(target_right==1)/float(sum(target_right==0))
try:
self.classifier_right.fit(X, target_right, regressor__sample_weight=weight_right)
except TypeError:
self.classifier_right.fit(X, target_right)
return self
def fit_transform(self, X, y=None):
target_left = np.array(y)
target_left[target_left != 1] = 0
X_left = self.classifier_left.fit_transform(X, target_left)
target_right = np.array(y)
target_right[target_right != -1] = 0
target_right[target_right == -1] = 1
X_right = self.classifier_right.fit_transform(X, target_right)
return X_left, X_right
def transform(self, X):
return self.classifier_left.transform(X), self.classifier_right.transform(X)
def predict(self, X):
predictions_left = self.classifier_left.predict(X)
predictions_right = self.classifier_right.predict(X)
predictions = predictions_left - predictions_right
if self.symmetrize:
predictions[0::2] = (predictions[0::2] - predictions[1::2])/2
predictions[1::2] = -predictions[0::2]
return predictions
class CauseEffectEstimatorID(BaseEstimator):
def __init__(self, features_independence=None, features_direction=None, regressor=None, params=None, symmetrize=True):
self.extractor = f.extract_features
self.classifier_independence = get_pipeline(features_independence, regressor, params)
self.classifier_direction = get_pipeline(features_direction, regressor, params)
self.symmetrize = symmetrize
def extract(self, features):
return self.extractor(features)
def fit(self, X, y=None):
#independence training pairs
train_independence = X
target_independence = np.array(y)
target_independence[target_independence != 0] = 1
weight_independence = np.ones(len(target_independence))
weight_independence[target_independence==0] = sum(target_independence==1)/float(sum(target_independence==0))
try:
self.classifier_independence.fit(train_independence, target_independence, regressor__sample_weight=weight_independence)
except TypeError:
self.classifier_independence.fit(train_independence, target_independence)
#direction training pairs
direction_filter = y != 0
train_direction = X[direction_filter]
target_direction = y[direction_filter]
weight_direction = np.ones(len(target_direction))
weight_direction[target_direction==0] = sum(target_direction==1)/float(sum(target_direction==0))
try:
self.classifier_direction.fit(train_direction, target_direction, regressor__sample_weight=weight_direction)
except TypeError:
self.classifier_direction.fit(train_direction, target_direction)
return self
def fit_transform(self, X, y=None):
#independence training pairs
train_independence = X
target_independence = np.array(y)
target_independence[target_independence != 0] = 1
X_ind = self.classifier_independence.fit_transform(train_independence, target_independence)
#direction training pairs
direction_filter = y != 0
train_direction = X[direction_filter]
target_direction = y[direction_filter]
self.classifier_direction.fit(train_direction, target_direction)
X_dir = self.classifier_direction.transform(X)
return X_ind, X_dir
def transform(self, X):
X_ind = self.classifier_independence.transform(X)
X_dir = self.classifier_direction.transform(X)
return X_ind, X_dir
def predict(self, X):
predictions_independence = self.classifier_independence.predict(X)
if self.symmetrize:
predictions_independence[0::2] = (predictions_independence[0::2] + predictions_independence[1::2])/2
predictions_independence[1::2] = predictions_independence[0::2]
assert predictions_independence.min() >= 0
predictions_direction = self.classifier_direction.predict(X)
if self.symmetrize:
predictions_direction[0::2] = (predictions_direction[0::2] - predictions_direction[1::2])/2
predictions_direction[1::2] = -predictions_direction[0::2]
return predictions_independence * predictions_direction
def calculate_method(args):
obj = args[0]
name = args[1]
margs = args[2]
method = getattr(obj, name)
return method(*margs)
def pmap(func, mlist, n_jobs):
if n_jobs != 1:
pool = Pool(n_jobs if n_jobs != -1 else None)
mmap = pool.map
else:
mmap = map
return mmap(func, mlist)
class CauseEffectSystemCombination(BaseEstimator):
def __init__(self, extractor=f.extract_features, weights=None, symmetrize=True, n_jobs=-1):
self.extractor = extractor
self.features = selected_features
self.systems = [
CauseEffectEstimatorID(
features_direction=self.features,
features_independence=self.features,
regressor=GradientBoostingClassifier,
params=gbc_params,
symmetrize=symmetrize),
CauseEffectEstimatorSymmetric(
features=self.features,
regressor=GradientBoostingClassifier,
params=gbc_params,
symmetrize=symmetrize),
CauseEffectEstimatorOneStep(
features=self.features,
regressor=GradientBoostingClassifier,
params=gbc_params,
symmetrize=symmetrize),
]
self.weights = weights
self.n_jobs = n_jobs
def extract(self, features):
return self.extractor(features, n_jobs=self.n_jobs)
def fit(self, X, y=None):
task = [(m, 'fit', (X, y)) for m in self.systems]
self.systems = pmap(calculate_method, task, self.n_jobs)
return self
def fit_transform(self, X, y=None):
task = [(m, 'fit_transform', (X, y)) for m in self.systems]
return pmap(calculate_method, task, self.n_jobs)
def transform(self, X):
task = [(m, 'transform', (X,)) for m in self.systems]
return pmap(calculate_method, task, self.n_jobs)
def predict(self, X):
task = [(m, 'predict', (X,)) for m in self.systems]
a = np.array(pmap(calculate_method, task, self.n_jobs))
if self.weights is not None:
return np.dot(self.weights, a)
else:
return a
|
|
from django.shortcuts import get_object_or_404
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound, ValidationError, PermissionDenied
from api.base.exceptions import Gone
from api.base import permissions as base_permissions
from api.base.views import JSONAPIBaseView
from api.comments.permissions import (
CommentDetailPermissions,
CommentReportsPermissions,
)
from api.comments.serializers import (
CommentSerializer,
NodeCommentDetailSerializer,
RegistrationCommentDetailSerializer,
CommentReportSerializer,
CommentReportDetailSerializer,
CommentReport,
)
from framework.auth.core import Auth
from framework.auth.oauth_scopes import CoreScopes
from framework.exceptions import PermissionsError
from osf.models import AbstractNode, Comment, BaseFileNode
from addons.wiki.models import WikiPage
class CommentMixin(object):
"""Mixin with convenience methods for retrieving the current comment based on the
current URL. By default, fetches the comment based on the comment_id kwarg.
"""
serializer_class = CommentSerializer
comment_lookup_url_kwarg = 'comment_id'
def get_comment(self, check_permissions=True):
pk = self.kwargs[self.comment_lookup_url_kwarg]
comment = get_object_or_404(Comment, guids___id=pk, root_target__isnull=False, guids___id__isnull=False)
if comment.root_target is None:
raise NotFound
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, comment)
return comment
class CommentDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, CommentMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/comments_read).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
CommentDetailPermissions,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_COMMENTS_READ]
required_write_scopes = [CoreScopes.NODE_COMMENTS_WRITE]
serializer_class = NodeCommentDetailSerializer
view_category = 'comments'
view_name = 'comment-detail'
# overrides RetrieveAPIView
def get_object(self):
comment = self.get_comment()
comment_node = None
if isinstance(comment.target.referent, AbstractNode):
comment_node = comment.target.referent
elif isinstance(comment.target.referent, BaseFileNode):
comment_node = comment.target.referent.target
elif isinstance(comment.target.referent, WikiPage):
comment_node = comment.target.referent.node
if comment_node and comment_node.is_registration:
self.serializer_class = RegistrationCommentDetailSerializer
return comment
def perform_destroy(self, instance):
auth = Auth(self.request.user)
if instance.is_deleted:
raise ValidationError('Comment already deleted.')
else:
try:
instance.delete(auth, save=True)
except PermissionsError:
raise PermissionDenied('Not authorized to delete this comment.')
class CommentReportsList(JSONAPIBaseView, generics.ListCreateAPIView, CommentMixin):
"""List of reports made for a comment. *Writeable*.
Paginated list of reports for a comment. Each resource contains the full representation of the
report, meaning additional requests to an individual comment's report detail view are not necessary.
###Permissions
The comment reports endpoint can only be viewed by users with permission to comment on the node. Users
are only shown comment reports that they have made.
##Attributes
OSF comment report entities have the "comment_reports" `type`.
name type description
=====================================================================================
category string the type of spam, must be one of the allowed values
message string description of why the comment was reported
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comment_reports", # required
"attributes": {
"category": {category}, # mandatory
"message": {text}, # optional
}
}
}
Success: 201 CREATED + comment report representation
To create a report for this comment, issue a POST request against this endpoint. The `category` field is mandatory,
and must be one of the following: "spam", "hate" or "violence" . The `message` field is optional. If the comment
report creation is successful the API will return a 201 response with the representation of the new comment report
in the body. For the new comment report's canonical URL, see the `/links/self` field of the response.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticated,
CommentReportsPermissions,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.COMMENT_REPORTS_READ]
required_write_scopes = [CoreScopes.COMMENT_REPORTS_WRITE]
serializer_class = CommentReportSerializer
view_category = 'comments'
view_name = 'comment-reports'
ordering = ('-modified',)
def get_queryset(self):
user_id = self.request.user._id
comment = self.get_comment()
reports = comment.reports
serialized_reports = []
if user_id in reports:
report = CommentReport(user_id, reports[user_id]['category'], reports[user_id]['text'])
serialized_reports.append(report)
return serialized_reports
class CommentReportDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, CommentMixin):
"""Details about a specific comment report. *Writeable*.
###Permissions
A comment report detail can only be viewed, edited and removed by the user who created the report.
##Attributes
OSF comment report entities have the "comment_reports" `type`.
name type description
=====================================================================================
category string the type of spam, must be one of the allowed values
message string description of why the comment was reported
##Links
self: the canonical api endpoint of this comment report
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "comment_reports", # required
"id": {user_id}, # required
"attributes": {
"category": {category}, # mandatory
"message": {text}, # optional
}
}
}
Success: 200 OK + comment report representation
To update a report for this comment, issue a PUT/PATCH request against this endpoint. The `category` field is
mandatory for a PUT request and must be one of the following: "spam", "hate" or "violence". The `message` field
is optional. Non-string values will be accepted and stringified, but we make no promises about the stringification
output. So don't do that.
###Delete
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 + No content
To delete a comment report, issue a DELETE request against `/links/self`. A successful delete will return a
204 No Content response.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticated,
CommentReportsPermissions,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.COMMENT_REPORTS_READ]
required_write_scopes = [CoreScopes.COMMENT_REPORTS_WRITE]
serializer_class = CommentReportDetailSerializer
view_category = 'comments'
view_name = 'report-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
comment = self.get_comment()
reports = comment.reports
user_id = self.request.user._id
reporter_id = self.kwargs['user_id']
if reporter_id != user_id:
raise PermissionDenied('Not authorized to comment on this project.')
if reporter_id in reports:
return CommentReport(user_id, reports[user_id]['category'], reports[user_id]['text'])
else:
raise Gone(detail='The requested comment report is no longer available.')
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
user = self.request.user
comment = self.get_comment()
try:
comment.retract_report(user, save=True)
except ValueError as error:
raise ValidationError(str(error))
|
|
from conans.model.options import Options, PackageOptions, OptionsValues
from conans.model.requires import Requirements
from conans.model.build_info import DepsCppInfo
from conans import tools # @UnusedImport KEEP THIS! Needed for pyinstaller to copy to exe.
from conans.errors import ConanException
from conans.model.env_info import DepsEnvInfo
import os
def create_options(conanfile):
try:
package_options = PackageOptions(getattr(conanfile, "options", None))
options = Options(package_options)
default_options = getattr(conanfile, "default_options", None)
if default_options:
if isinstance(default_options, tuple):
default_values = OptionsValues.loads("\n".join(default_options))
elif isinstance(default_options, list):
default_values = OptionsValues.from_list(default_options)
elif isinstance(default_options, str):
default_values = OptionsValues.loads(default_options)
else:
raise ConanException("Please define your default_options as list or "
"multiline string")
options.values = default_values
return options
except Exception as e:
raise ConanException("Error while initializing options. %s" % str(e))
def create_requirements(conanfile):
try:
# Actual requirements of this package
if not hasattr(conanfile, "requires"):
return Requirements()
else:
if not conanfile.requires:
return Requirements()
if isinstance(conanfile.requires, tuple):
return Requirements(*conanfile.requires)
else:
return Requirements(conanfile.requires, )
except Exception as e:
raise ConanException("Error while initializing requirements. %s" % str(e))
def create_settings(conanfile, settings):
try:
defined_settings = getattr(conanfile, "settings", None)
if isinstance(defined_settings, str):
defined_settings = [defined_settings]
current = defined_settings or {}
settings.constraint(current)
return settings
except Exception as e:
raise ConanException("Error while initializing settings. %s" % str(e))
def create_exports(conanfile):
if not hasattr(conanfile, "exports"):
return None
else:
if isinstance(conanfile.exports, str):
return (conanfile.exports, )
return conanfile.exports
class ConanFile(object):
""" The base class for all conans
"""
name = None
version = None # Any str, can be "1.1" or whatever
url = None # The URL where this File is located, as github, to collaborate in package
# The license of the PACKAGE, just a shortcut, does not replace or
# change the actual license of the source code
license = None
author = None # Main maintainer/responsible for the package, any format
build_policy = None
short_paths = False
def __init__(self, output, runner, settings, conanfile_directory, user=None, channel=None):
'''
param settings: Settings
'''
# User defined generators
self.generators = self.generators if hasattr(self, "generators") else ["txt"]
if isinstance(self.generators, str):
self.generators = [self.generators]
# User defined options
self.options = create_options(self)
self.requires = create_requirements(self)
self.settings = create_settings(self, settings)
self.exports = create_exports(self)
# needed variables to pack the project
self.cpp_info = None # Will be initialized at processing time
self.deps_cpp_info = DepsCppInfo()
# environment variables declared in the package_info
self.env_info = None # Will be initialized at processing time
self.deps_env_info = DepsEnvInfo()
self.copy = None # initialized at runtime
# an output stream (writeln, info, warn error)
self.output = output
# something that can run commands, as os.sytem
self._runner = runner
self._conanfile_directory = conanfile_directory
self.package_folder = None # Assigned at runtime
self._scope = None
# user specified env variables
self.env = None # Assigned at runtime
self._user = user
self._channel = channel
@property
def channel(self):
if not self._channel:
self._channel = os.getenv("CONAN_CHANNEL")
if not self._channel:
raise ConanException("CONAN_CHANNEL environment variable not defined, "
"but self.channel is used in conanfile")
return self._channel
@property
def user(self):
if not self._user:
self._user = os.getenv("CONAN_USERNAME")
if not self._user:
raise ConanException("CONAN_USERNAME environment variable not defined, "
"but self.user is used in conanfile")
return self._user
def collect_libs(self, folder="lib"):
if not self.package_folder:
return []
lib_folder = os.path.join(self.package_folder, folder)
if not os.path.exists(lib_folder):
self.output.warn("Package folder doesn't exist, can't collect libraries")
return []
files = os.listdir(lib_folder)
result = []
for f in files:
name, ext = os.path.splitext(f)
if ext in (".so", ".lib", ".a", ".dylib"):
if ext != ".lib" and name.startswith("lib"):
name = name[3:]
result.append(name)
return result
@property
def scope(self):
return self._scope
@scope.setter
def scope(self, value):
self._scope = value
if value.dev:
self.requires.allow_dev = True
try:
if hasattr(self, "dev_requires"):
if isinstance(self.dev_requires, tuple):
self.requires.add_dev(*self.dev_requires)
else:
self.requires.add_dev(self.dev_requires, )
except Exception as e:
raise ConanException("Error while initializing dev_requirements. %s" % str(e))
@property
def conanfile_directory(self):
return self._conanfile_directory
@property
def build_policy_missing(self):
return self.build_policy == "missing"
@property
def build_policy_always(self):
return self.build_policy == "always"
def source(self):
pass
def requirements(self):
pass
def system_requirements(self):
""" this method can be overwritten to implement logic for system package
managers, as apt-get
You can define self.global_system_requirements = True, if you want the installation
to be for all packages (not depending on settings/options/requirements)
"""
def config_options(self):
""" modify options, probably conditioned to some settings. This call is executed
before config_settings. E.g.
if self.settings.os == "Windows":
del self.options.shared # shared/static not supported in win
"""
def configure(self):
""" modify settings, probably conditioned to some options. This call is executed
after config_options. E.g.
if self.options.header_only:
self.settings.clear()
This is also the place for conditional requirements
"""
def imports(self):
pass
def build(self):
self.output.warn("This conanfile has no build step")
def package(self):
self.output.warn("This conanfile has no package step")
def package_info(self):
""" define cpp_build_info, flags, etc
"""
def run(self, command, output=True, cwd=None):
""" runs such a command in the folder the Conan
is defined
"""
retcode = self._runner(command, output, cwd)
if retcode != 0:
raise ConanException("Error %d while executing %s" % (retcode, command))
def conan_info(self):
""" modify the conans info, typically to narrow values
eg.: conaninfo.package_references = []
"""
def test(self):
raise ConanException("You need to create a method 'test' in your test/conanfile.py")
def __repr__(self):
result = []
result.append("name: %s" % self.name)
result.append("version: %s" % self.version)
return '\n'.join(result)
|
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
import glob
import json
#############################################
# Convert Hearthstone card data XML to JSON #
#############################################
__author__ = "Taylor Caldwell - http://github.com/rithms"
__copyright__ = "Copyright 2015, Taylor Caldwell"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Taylor Caldwell"
__email__ = "tcaldwel@nmsu.edu"
__status__ = "Production"
# EnumIds - Non-Boolean
enum_dict = {
45 : "health",
47 : "attack",
48 : "cost",
183 : "cardSet",
184 : "cardTextInHand",
185 : "cardName",
187 : "durability",
199 : "class",
200 : "race",
201 : "faction",
202 : "cardType",
203 : "rarity",
251 : "attackVisualType",
252 : "cardTextInPlay",
268 : "devState",
325 : "targetingArrowText",
330 : "enchantmentBirthVisual",
331 : "enchantmentIdleVisual",
342 : "artistName",
351 : "flavorText",
365 : "howToGetThisGoldCard",
364 : "howToGetThisCard",
#377 : "unknownHasOnDrawEffect",
#380 : "unknownBlackrockHeroes",
#389 : "unknownDuneMaulShaman",
#402 : "unknownIntenseGaze",
#401 : "unknownBroodAffliction"
}
# EnumIds - Boolean
bool_dict = {
32 : "Trigger Visual",
114 : "elite",
321 : "collectible",
189 : "Windfury",
190 : "Taunt",
191 : "Stealth",
192 : "Spell Power",
194 : "Divine Shield",
197 : "Charge",
205 : "Summoned",
208 : "Freeze",
212 : "Enrage",
215 : "Overload",
217 : "Deathrattle",
218 : "Battlecry",
219 : "Secret",
220 : "Combo",
240 : "Can't Be Damaged",
293 : "Morph",
335 : "Invisible Deathrattle",
338 : "One Turn Effect",
339 : "Silence",
340 : "Counter",
349 : "Immune To Spell Power",
350 : "Adjacent Buff",
361 : "Heal Target",
362 : "Aura",
363 : "Poisonous",
367 : "AI Must Play",
370 : "Affected By Spell Power",
388 : "Spare Part",
}
# Card Class IDs
class_dict = {
0 : "Developer",
2 : "Druid",
3 : "Hunter",
4 : "Mage",
5 : "Paladin",
6 : "Priest",
7 : "Rogue",
8 : "Shaman",
9 : "Warlock",
10 : "Warrior",
11 : "Dream"
}
# Card Set IDs
set_dict = {
2 : "Basic",
3 : "Classic",
4 : "Reward",
5 : "Missions",
7 : "System",
8 : "Debug",
11 : "Promotion",
12 : "Curse of Naxxramas",
13 : "Goblin vs Gnomes",
14 : "Blackrock Mountain",
16 : "Credits"
}
# Card Type IDs
type_dict = {
3 : "Hero",
4 : "Minion",
5 : "Spell",
6 : "Enchantment",
7 : "Weapon",
10 : "Hero Power"
}
# Card Race IDs
race_dict = {
14 : "Murloc",
15 : "Demon",
17 : "Mechanical",
20 : "Beast",
21 : "Totem",
23 : "Pirate",
24 : "Dragon"
}
# Card Faction IDs
faction_dict = {
1 : "Horde",
2 : "Alliance",
3 : "Neutral"
}
# Card Rarity IDs
rarity_dict = {
0 : "Developer",
1 : "Common",
2 : "Free",
3 : "Rare",
4 : "Epic",
5 : "Legendary"
}
# Get the name of the corresponding enum ID
def get_name(enum_id, d):
if enum_id in d:
return d[enum_id]
for f in glob.glob('cardxml0/CAB-cardxml0/TextAsset/*.txt'):
with open(f) as cardfile:
file_name = f.split('/')[-1].split('.')[0]
cardsoup = BeautifulSoup(cardfile.read(), features="xml")
cards = cardsoup.find_all('Entity')
json_dict = { 'data' : {} }
for card in cards:
card_id = card.get('CardID')
json_dict['data'][card_id] = { 'id' : card_id, 'mechanics' : [] }
tags = card.find_all('Tag')
for tag in tags:
enum_id = int(tag.get('enumID'))
if(tag.get('type') == 'String'):
enum_name = tag.text
else:
enum_name = tag.get('value')
if enum_id in enum_dict:
field = enum_dict[enum_id]
if field == 'class':
enum_name = get_name(int(enum_name), class_dict)
elif field == 'cardSet':
enum_name = enum_name = get_name(int(enum_name), set_dict)
elif field == 'cardType':
enum_name = get_name(int(enum_name), type_dict)
elif field == 'race':
enum_name = get_name(int(enum_name), race_dict)
elif field == 'faction':
enum_name = get_name(int(enum_name), faction_dict)
elif field == 'rarity':
enum_name = get_name(int(enum_name), rarity_dict)
json_dict['data'][card_id][enum_dict[enum_id]] = enum_name
elif enum_id in bool_dict:
field = bool_dict[enum_id]
if field == 'collectible' or field == 'elite':
if enum_name == '1':
json_dict['data'][card_id][field] = True
elif enum_name == '0':
json_dict['data'][card_id][field] = False
else:
if enum_name == '1':
json_dict['data'][card_id]['mechanics'].append(field)
for key in bool_dict:
field = bool_dict[key]
if field == 'collectible' or field == 'elite':
if field not in json_dict['data'][card_id]:
json_dict['data'][card_id][field] = False
if not json_dict['data'][card_id]['mechanics']:
del json_dict['data'][card_id]['mechanics']
with open(file_name+'.json', 'w') as outfile:
json.dump(json_dict, outfile, sort_keys=True)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import (
RegionCommitmentsTransport,
DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO,
)
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class RegionCommitmentsRestInterceptor:
"""Interceptor for RegionCommitments.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the RegionCommitmentsRestTransport.
.. code-block:: python
class MyCustomRegionCommitmentsInterceptor(RegionCommitmentsRestInterceptor):
def pre_aggregated_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_aggregated_list(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_update(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update(response):
logging.log(f"Received response: {response}")
transport = RegionCommitmentsRestTransport(interceptor=MyCustomRegionCommitmentsInterceptor())
client = RegionCommitmentsClient(transport=transport)
"""
def pre_aggregated_list(
self,
request: compute.AggregatedListRegionCommitmentsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.AggregatedListRegionCommitmentsRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for aggregated_list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionCommitments server.
"""
return request, metadata
def post_aggregated_list(
self, response: compute.CommitmentAggregatedList
) -> compute.CommitmentAggregatedList:
"""Post-rpc interceptor for aggregated_list
Override in a subclass to manipulate the response
after it is returned by the RegionCommitments server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetRegionCommitmentRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetRegionCommitmentRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionCommitments server.
"""
return request, metadata
def post_get(self, response: compute.Commitment) -> compute.Commitment:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the RegionCommitments server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertRegionCommitmentRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertRegionCommitmentRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionCommitments server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the RegionCommitments server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListRegionCommitmentsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListRegionCommitmentsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionCommitments server.
"""
return request, metadata
def post_list(self, response: compute.CommitmentList) -> compute.CommitmentList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the RegionCommitments server but before
it is returned to user code.
"""
return response
def pre_update(
self,
request: compute.UpdateRegionCommitmentRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.UpdateRegionCommitmentRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionCommitments server.
"""
return request, metadata
def post_update(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for update
Override in a subclass to manipulate the response
after it is returned by the RegionCommitments server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class RegionCommitmentsRestStub:
_session: AuthorizedSession
_host: str
_interceptor: RegionCommitmentsRestInterceptor
class RegionCommitmentsRestTransport(RegionCommitmentsTransport):
"""REST backend transport for RegionCommitments.
The RegionCommitments API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, RegionCommitmentsRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[RegionCommitmentsRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or RegionCommitmentsRestInterceptor()
self._prep_wrapped_messages(client_info)
class _AggregatedList(RegionCommitmentsRestStub):
def __hash__(self):
return hash("AggregatedList")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.AggregatedListRegionCommitmentsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.CommitmentAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListRegionCommitmentsRequest):
The request object. A request message for
RegionCommitments.AggregatedList. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.CommitmentAggregatedList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/aggregated/commitments",
},
]
request, metadata = self._interceptor.pre_aggregated_list(request, metadata)
request_kwargs = compute.AggregatedListRegionCommitmentsRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.AggregatedListRegionCommitmentsRequest.to_json(
compute.AggregatedListRegionCommitmentsRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.CommitmentAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_aggregated_list(resp)
return resp
class _Get(RegionCommitmentsRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetRegionCommitmentRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Commitment:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionCommitmentRequest):
The request object. A request message for
RegionCommitments.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Commitment:
Represents a regional Commitment
resource. Creating a commitment resource
means that you are purchasing a
committed use contract with an explicit
start and end time. You can create
commitments based on vCPUs and memory
usage and receive discounted rates. For
full details, read Signing Up for
Committed Use Discounts.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetRegionCommitmentRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetRegionCommitmentRequest.to_json(
compute.GetRegionCommitmentRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Commitment.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(RegionCommitmentsRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertRegionCommitmentRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertRegionCommitmentRequest):
The request object. A request message for
RegionCommitments.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/commitments",
"body": "commitment_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertRegionCommitmentRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.Commitment.to_json(
compute.Commitment(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertRegionCommitmentRequest.to_json(
compute.InsertRegionCommitmentRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(RegionCommitmentsRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListRegionCommitmentsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.CommitmentList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionCommitmentsRequest):
The request object. A request message for
RegionCommitments.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.CommitmentList:
Contains a list of Commitment
resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/commitments",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListRegionCommitmentsRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListRegionCommitmentsRequest.to_json(
compute.ListRegionCommitmentsRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.CommitmentList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _Update(RegionCommitmentsRestStub):
def __hash__(self):
return hash("Update")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.UpdateRegionCommitmentRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the update method over HTTP.
Args:
request (~.compute.UpdateRegionCommitmentRequest):
The request object. A request message for
RegionCommitments.Update. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}",
"body": "commitment_resource",
},
]
request, metadata = self._interceptor.pre_update(request, metadata)
request_kwargs = compute.UpdateRegionCommitmentRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.Commitment.to_json(
compute.Commitment(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.UpdateRegionCommitmentRequest.to_json(
compute.UpdateRegionCommitmentRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_update(resp)
return resp
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListRegionCommitmentsRequest],
compute.CommitmentAggregatedList,
]:
stub = self._STUBS.get("aggregated_list")
if not stub:
stub = self._STUBS["aggregated_list"] = self._AggregatedList(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(self) -> Callable[[compute.GetRegionCommitmentRequest], compute.Commitment]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertRegionCommitmentRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[[compute.ListRegionCommitmentsRequest], compute.CommitmentList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def update(
self,
) -> Callable[[compute.UpdateRegionCommitmentRequest], compute.Operation]:
stub = self._STUBS.get("update")
if not stub:
stub = self._STUBS["update"] = self._Update(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("RegionCommitmentsRestTransport",)
|
|
import re
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from urllib.parse import urlparse
import pytest
from bs4 import BeautifulSoup
from werkzeug.test import Client
from backend.common.consts.account_permission import AccountPermission
from backend.common.consts.event_type import EventType
from backend.common.consts.suggestion_state import SuggestionState
from backend.common.models.event import Event
from backend.common.models.suggestion import Suggestion
from backend.common.models.webcast import Webcast, WebcastType
from backend.common.suggestions.suggestion_creator import (
SuggestionCreationStatus,
SuggestionCreator,
)
from backend.web.handlers.conftest import get_inputs_from_form
@pytest.fixture(autouse=True)
def create_event(ndb_stub) -> None:
event = Event(
id="2016necmp",
name="New England District Championship",
event_type_enum=EventType.OFFSEASON,
short_name="New England",
event_short="necmp",
year=2016,
end_date=datetime(2016, 3, 27),
official=False,
city="Hartford",
state_prov="CT",
country="USA",
venue="Some Venue",
venue_address="Some Venue, Hartford, CT, USA",
timezone_id="America/New_York",
start_date=datetime(2016, 3, 24),
webcast_json="",
website="http://www.firstsv.org",
)
event.put()
@pytest.fixture
def login_user_with_permission(login_user):
login_user.permissions = [AccountPermission.REVIEW_MEDIA]
return login_user
def get_suggestion_queue_and_fields(
web_client: Client, form_id: Optional[str] = None
) -> Tuple[List[str], Dict]:
response = web_client.get("/suggest/event/webcast/review")
assert response.status_code == 200
soup = BeautifulSoup(response.data, "html.parser")
review_form = soup.find(id="review_webcasts")
assert review_form is not None
suggestions = review_form.find_all(id=re.compile("review_.*"))
queue = []
for suggestion in suggestions:
queue.append(suggestion["id"].split("review_")[1])
inputs = None
if form_id:
form = soup.find(id=form_id)
assert form is not None
inputs = get_inputs_from_form(form)
return queue, (inputs or {})
def createSuggestion(logged_in_user) -> str:
status = SuggestionCreator.createEventWebcastSuggestion(
logged_in_user.account_key,
"https://twitch.tv/frcgamesense",
"",
"2016necmp",
)
assert status == SuggestionCreationStatus.SUCCESS
return "webcast_2016necmp_twitch_frcgamesense_None"
def test_login_redirect(web_client: Client) -> None:
response = web_client.get("/suggest/event/webcast/review")
assert response.status_code == 302
assert urlparse(response.headers["Location"]).path == "/account/login"
def test_no_permissions(login_user, web_client: Client) -> None:
response = web_client.get("/suggest/event/webcast/review")
assert response.status_code == 401
def test_nothing_to_review(login_user_with_permission, web_client: Client) -> None:
queue, _ = get_suggestion_queue_and_fields(web_client)
assert queue == []
def test_reject_all(login_user_with_permission, web_client: Client, ndb_stub) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue, reject_fields = get_suggestion_queue_and_fields(
web_client, "reject_all_2016necmp"
)
assert queue == [suggestion_id]
assert reject_fields is not {}
reject_fields["verdict"] = "reject_all"
response = web_client.post(
"/suggest/event/webcast/review",
data=reject_fields,
follow_redirects=True,
)
assert response.status_code == 200
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_REJECTED
# Make sure the Event has no webcasts
event = Event.get_by_id("2016necmp")
assert event is not None
assert event.webcast == []
def test_accept_with_default_details(
login_user_with_permission,
web_client: Client,
ndb_stub,
taskqueue_stub,
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue, form_fields = get_suggestion_queue_and_fields(
web_client, f"review_{suggestion_id}"
)
assert queue == [suggestion_id]
assert form_fields is not {}
form_fields["verdict"] = "accept"
response = web_client.post(
"/suggest/event/webcast/review",
data=form_fields,
follow_redirects=True,
)
assert response.status_code == 200
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
# Make sure the Event has a webcast
event = Event.get_by_id("2016necmp")
assert event is not None
assert event.webcast == [Webcast(type=WebcastType.TWITCH, channel="frcgamesense")]
def test_accept_with_different_details(
login_user_with_permission,
web_client: Client,
ndb_stub,
taskqueue_stub,
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue, form_fields = get_suggestion_queue_and_fields(
web_client, f"review_{suggestion_id}"
)
assert queue == [suggestion_id]
assert form_fields is not {}
form_fields["webcast_type"] = "youtube"
form_fields["webcast_channel"] = "foobar"
form_fields["webcast_file"] = "meow"
form_fields["verdict"] = "accept"
form_fields["verdict"] = "accept"
response = web_client.post(
"/suggest/event/webcast/review",
data=form_fields,
follow_redirects=True,
)
assert response.status_code == 200
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_ACCEPTED
# Make sure the Event has a webcast
event = Event.get_by_id("2016necmp")
assert event is not None
assert event.webcast == [
Webcast(
type=WebcastType.YOUTUBE,
channel="foobar",
file="meow",
)
]
def test_reject_single_webcast(
login_user_with_permission, web_client: Client, ndb_stub
) -> None:
suggestion_id = createSuggestion(login_user_with_permission)
queue, form_fields = get_suggestion_queue_and_fields(
web_client, f"review_{suggestion_id}"
)
assert queue == [suggestion_id]
assert form_fields is not {}
form_fields["verdict"] = "reject"
response = web_client.post(
"/suggest/event/webcast/review",
data=form_fields,
follow_redirects=True,
)
assert response.status_code == 200
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
assert suggestion is not None
assert suggestion.review_state == SuggestionState.REVIEW_REJECTED
# Make sure the Event has no webcasts
event = Event.get_by_id("2016necmp")
assert event is not None
assert event.webcast == []
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2020, imageio contributors
#
# imageio is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
# styletest: skip
"""
Release:
* Write release notes
* Increase __version__
* git tag the release (and push the tag to Github)
* Upload to Pypi: python setup.py sdist bdist_wheel upload
* Update conda recipe on conda-forge feedstock
"""
import os
import os.path as op
import sys
import shutil
from distutils.core import Command
from distutils.command.sdist import sdist
from distutils.command.build_py import build_py
from itertools import chain
try:
from setuptools import setup # Supports wheels
except ImportError:
from distutils.core import setup # Supports anything else
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
bdist_wheel = object
name = "imageio"
description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# Get version and docstring
__version__ = None
__doc__ = ""
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(THIS_DIR, "imageio", "__init__.py")
for line in open(initFile).readlines():
if line.startswith("__version__"):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line.rstrip() + "\n"
# Template for long description. __doc__ gets inserted here
long_description = """
.. image:: https://github.com/imageio/imageio/workflows/CI/badge.svg
:target: https://github.com/imageio/imageio/actions
__doc__
Release notes: https://github.com/imageio/imageio/blob/master/CHANGELOG.md
Example:
.. code-block:: python
>>> import imageio
>>> im = imageio.imread('imageio:astronaut.png')
>>> im.shape # im is a numpy array
(512, 512, 3)
>>> imageio.imwrite('astronaut-gray.jpg', im[:, :, 0])
See the `API Reference <https://imageio.readthedocs.io/en/stable/reference/index.html>`_
or `examples <https://imageio.readthedocs.io/en/stable/examples.html>`_
for more information.
"""
# Prepare resources dir
package_data = [
"resources/shipped_resources_go_here",
"resources/*.*",
"resources/images/*.*",
"resources/freeimage/*.*",
]
def _set_crossplatform_resources(resource_dir):
import imageio
# Clear now
if op.isdir(resource_dir):
shutil.rmtree(resource_dir)
os.mkdir(resource_dir)
open(op.join(resource_dir, "shipped_resources_go_here"), "wb")
# Load images
for fname in [
"images/chelsea.png",
"images/chelsea.zip",
"images/astronaut.png",
"images/newtonscradle.gif",
"images/cockatoo.mp4",
"images/realshort.mp4",
"images/stent.npz",
]:
imageio.core.get_remote_file(fname, resource_dir, force_download=True)
def _set_platform_resources(resource_dir, platform):
import imageio
# Create file to show platform
assert platform
open(op.join(resource_dir, "platform_%s" % platform), "wb")
# Load freeimage
fname = imageio.plugins.freeimage.FNAME_PER_PLATFORM[platform]
imageio.core.get_remote_file(
"freeimage/" + fname, resource_dir, force_download=True
)
class test_command(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from imageio import testing
os.environ["IMAGEIO_NO_INTERNET"] = "1" # run tests without inet
sys.exit(testing.test_unit())
class build_with_fi(build_py):
def run(self):
# Download images and libs
import imageio
resource_dir = imageio.core.resource_dirs()[0]
_set_crossplatform_resources(resource_dir)
_set_platform_resources(resource_dir, imageio.core.get_platform())
# Build as normal
build_py.run(self)
class build_with_images(sdist):
def run(self):
# Download images
import imageio
resource_dir = imageio.core.resource_dirs()[0]
_set_crossplatform_resources(resource_dir)
# Build as normal
sdist.run(self)
# pinned to > 8.3.2 due to security vulnerability
# See: https://github.com/advisories/GHSA-98vv-pw6r-q6q4
install_requires = ["numpy >= 1.20.0", "pillow >= 8.3.2"]
extras_require = {
"build": ["wheel"],
"linting": ["black", "flake8"],
"test": ["invoke", "pytest", "pytest-cov", "fsspec[github]"],
"docs": ["sphinx", "numpydoc", "pydata-sphinx-theme"],
"itk": ["itk"],
"bsdf": [],
"dicom": [],
"feisem": [],
"ffmpeg": ["imageio-ffmpeg", "psutil"],
"fits": ["astropy"],
"freeimage": [],
"gdal": ["gdal"],
"lytro": [],
"numpy": [],
"pillow": [],
"simpleitk": [],
"spe": [],
"swf": [],
"tifffile": ["tifffile"],
}
extras_require["full"] = sorted(set(chain.from_iterable(extras_require.values())))
extras_require["dev"] = extras_require["test"] + extras_require["linting"]
setup(
cmdclass={ # 'bdist_wheel_all': bdist_wheel_all,
# 'sdist_all': sdist_all,
"build_with_images": build_with_images,
"build_with_fi": build_with_fi,
"sdist": build_with_images,
"test": test_command,
},
name=name,
version=__version__,
author="imageio contributors",
author_email="almar.klein@gmail.com",
license="BSD-2-Clause",
url="https://github.com/imageio/imageio",
download_url="http://pypi.python.org/pypi/imageio",
keywords="image video volume imread imwrite io animation ffmpeg",
description=description,
long_description=long_description.replace("__doc__", __doc__),
platforms="any",
provides=["imageio"],
python_requires=">=3.5",
install_requires=install_requires,
extras_require=extras_require,
packages=["imageio", "imageio.core", "imageio.plugins", "imageio.config"],
package_dir={"imageio": "imageio"},
# Data in the package
package_data={"imageio": package_data},
entry_points={
"console_scripts": [
"imageio_download_bin=imageio.__main__:download_bin_main",
"imageio_remove_bin=imageio.__main__:remove_bin_main",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
|
|
#!/usr/bin/env python
import openravepy
import unittest
import numpy
from openravepy import Environment
from prpy.clone import CloneException
from prpy.planning.base import PlanningError
from prpy.planning.cbirrt import CBiRRTPlanner
from prpy.planning.ompl import OMPLPlanner, OMPLSimplifier
from prpy.planning.retimer import ParabolicRetimer
from prpy.planning.mac_smoother import MacSmoother
from numpy.testing import assert_allclose
VerifyTrajectory = openravepy.planningutils.VerifyTrajectory
# Generic test setup
#
# This class is the base class of all planner tests. It is responsible for
# setting up the environment, but does directly not implement any tests.
class BasePlannerTest(object):
"""Generic environment setup.
"""
active_dof_indices = range(7)
# Feasible start/goal pair.
config_feasible_start = numpy.array([
+2.35061574, 0.61043555, 0.85000000, 1.80684444, -0.08639935,
-0.69750474, 1.31656172
])
config_feasible_goal = numpy.array([
-0.84085883, 1.44573701, 0.20000000, 1.72620231, -0.81124757,
-1.39363597, 1.29233111
])
# This configuration is in collision with the environment, but is not in
# self-collision.
config_env_collision = numpy.array([
+3.63026273e-01, -1.54688036e+00, -1.30000000e+00,
+2.34703418e+00, 3.28152338e-01, -1.10662864e+00,
-2.07807269e-01
])
# This configuration is in self-collision, but is not in collision with the
# environment.
config_self_collision = numpy.array([
2.60643264e+00, 1.97222205e+00, -8.24298541e-16,
2.79154009e+00, 1.30899694e+00, -4.71027738e-16,
0.00000000e+00
])
# Waypoints that can be pair-wise connected by straight line paths.
waypoint1 = numpy.array([
1.12376031, 0.60576977, -0.05000000,
1.33403907, 0.44772461, -0.31481177,
1.90265540
])
waypoint2 = numpy.array([
1.53181533, 0.80270404, -0.05000000,
1.75341989, 0.21348846, -0.91026757,
1.59603932
])
waypoint3 = numpy.array([
1.50376031, 0.60576977, -0.05000000,
1.33403907, 0.44772461, -0.31481177,
1.90265540
])
def setUp(self):
self.env = Environment()
self.env.Load('data/wamtest2.env.xml')
self.robot = self.env.GetRobot('BarrettWAM')
self.manipulator = self.robot.GetManipulator('arm')
# TODO: Planning should succeed with the floor present if CO_ActiveOnly
# is set.
self.env.Remove(self.env.GetKinBody('floor'))
with self.env:
self.robot.SetActiveManipulator(self.manipulator)
self.robot.SetActiveDOFs(self.active_dof_indices)
self.planner = self.planner_factory()
def tearDown(self):
self.env.Destroy()
def _ValidatePath(self, path):
self.assertEquals(path.GetEnv(), self.env)
self.assertEquals(self.robot.GetActiveConfigurationSpecification('linear'),
path.GetConfigurationSpecification())
self.assertGreaterEqual(path.GetNumWaypoints(), 1)
def _CollisionCheckPath(self, traj):
# NOTE: This assumes that the trajectory only contains joint_values.
OpenStart = openravepy.Interval.OpenStart
params = openravepy.Planner.PlannerParameters()
params.SetRobotActiveJoints(self.robot)
# Check the first waypoint for collision. We do this outside of the
# loop so we can run all collision checks with OpenEnd. This prevents
# us from collision checking the intermediate waypoints twice.
prev_waypoint = traj.GetWaypoint(0)
check = params.CheckPathAllConstraints(prev_waypoint, prev_waypoint,
[], [], 0., OpenStart)
if check != 0:
return True
# Check the remainder of the path.
for iwaypoint in xrange(1, traj.GetNumWaypoints() - 1):
curr_waypoint = traj.GetWaypoint(iwaypoint)
check = params.CheckPathAllConstraints(
prev_waypoint, curr_waypoint, [], [], 0., OpenStart)
if check != 0:
return True
prev_waypoint = curr_waypoint
return False
def _ComputeArcLength(self, traj):
distance = 0.
for iwaypoint in xrange(1, traj.GetNumWaypoints()):
prev_waypoint = traj.GetWaypoint(iwaypoint - 1)
curr_waypoint = traj.GetWaypoint(iwaypoint)
distance += numpy.linalg.norm(curr_waypoint - prev_waypoint)
return distance
def assertTransformClose(self, actual_pose, expected_pose,
linear_tol=1e-3, angular_tol=1e-3):
rel_pose = numpy.dot(numpy.linalg.inv(actual_pose), expected_pose)
distance = numpy.linalg.norm(rel_pose[0:3, 3])
angle = numpy.arccos((numpy.trace(rel_pose[0:3, 0:3]) - 1.) / 2.)
self.assertLessEqual(distance, linear_tol)
self.assertLessEqual(angle, angular_tol)
# Method-specific tests
#
# Methods from BasePlannerTest are also available in these classes. However,
# they should NOT inherit from BasePlannerTest.
class PlanToConfigurationTest(object):
def test_PlanToConfiguration_GoalIsFeasible_FindsSolution(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_feasible_start)
# Test
path = self.planner.PlanToConfiguration(
self.robot, self.config_feasible_goal)
# Assert.
self.assertEquals(path.GetEnv(), self.env)
self.assertEquals(self.robot.GetActiveConfigurationSpecification('linear'),
path.GetConfigurationSpecification())
self.assertGreaterEqual(path.GetNumWaypoints(), 1)
first_waypoint = path.GetWaypoint(0)
last_waypoint = path.GetWaypoint(path.GetNumWaypoints() - 1)
self._ValidatePath(path)
assert_allclose(first_waypoint, self.config_feasible_start)
assert_allclose(last_waypoint, self.config_feasible_goal)
self.assertFalse(self._CollisionCheckPath(path))
def test_PlanToConfiguration_StartInCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_env_collision)
# Test/Assert
with self.assertRaises(PlanningError):
self.planner.PlanToConfiguration(
self.robot, self.config_feasible_goal)
def test_PlanToConfiguration_StartInSelfCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_self_collision)
# Test/Assert
with self.assertRaises((PlanningError, CloneException)):
self.planner.PlanToConfiguration(
self.robot, self.config_feasible_goal)
def test_PlanToConfiguration_GoalInCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_feasible_start)
# Test/Assert
with self.assertRaises(PlanningError):
self.planner.PlanToConfiguration(
self.robot, self.config_env_collision)
def test_PlanToConfiguration_GoalInSelfCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_feasible_start)
# Test/Assert
with self.assertRaises(PlanningError):
self.planner.PlanToConfiguration(
self.robot, self.config_self_collision)
class PlanToEndEffectorPoseTest(object):
def test_PlanToEndEffectorPose_GoalIsFeasible_FindsSolution(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_feasible_goal)
goal_ik = self.manipulator.GetEndEffectorTransform()
self.robot.SetActiveDOFValues(self.config_feasible_start)
# Test
path = self.planner.PlanToEndEffectorPose(self.robot, goal_ik)
# Assert
self._ValidatePath(path)
first_waypoint = path.GetWaypoint(0)
assert_allclose(first_waypoint, self.config_feasible_start)
with self.env:
last_waypoint = path.GetWaypoint(path.GetNumWaypoints() - 1)
self.robot.SetActiveDOFValues(last_waypoint)
last_ik = self.manipulator.GetEndEffectorTransform()
self.assertTransformClose(last_ik, goal_ik)
self.assertFalse(self._CollisionCheckPath(path))
def test_PlanToEndEffectorPose_StartInCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_feasible_goal)
goal_ik = self.manipulator.GetEndEffectorTransform()
self.robot.SetActiveDOFValues(self.config_env_collision)
# Test/Assert
with self.assertRaises(PlanningError):
self.planner.PlanToEndEffectorPose(self.robot, goal_ik)
def test_PlanToEndEffectorPose_StartInSelfCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_feasible_goal)
goal_ik = self.manipulator.GetEndEffectorTransform()
self.robot.SetActiveDOFValues(self.config_self_collision)
# Test/Assert
with self.assertRaises((PlanningError, CloneException)):
self.planner.PlanToEndEffectorPose(self.robot, goal_ik)
def test_PlanToEndEffectorPose_GoalInCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_env_collision)
goal_ik = self.manipulator.GetEndEffectorTransform()
self.robot.SetActiveDOFValues(self.config_feasible_start)
# Test/Assert
with self.assertRaises(PlanningError):
self.planner.PlanToEndEffectorPose(self.robot, goal_ik)
def test_PlanToEndEffectorPose_GoalInSelfCollision_Throws(self):
# Setup
with self.env:
self.robot.SetActiveDOFValues(self.config_self_collision)
goal_ik = self.manipulator.GetEndEffectorTransform()
self.robot.SetActiveDOFValues(self.config_feasible_start)
# Test/Assert
with self.assertRaises(PlanningError):
self.planner.PlanToEndEffectorPose(self.robot, goal_ik)
class ShortcutPathTest(object):
def setUp(self):
self.input_path = openravepy.RaveCreateTrajectory(self.env, '')
self.input_path.Init(self.robot.GetActiveConfigurationSpecification('linear'))
self.input_path.Insert(0, self.waypoint1)
self.input_path.Insert(1, self.waypoint2)
self.input_path.Insert(2, self.waypoint3)
def test_ShortcutPath_ShortcutExists_ReducesLength(self):
# Setup/Test
smoothed_path = self.planner.ShortcutPath(self.robot, self.input_path)
# Assert
self.assertEquals(smoothed_path.GetConfigurationSpecification(),
self.input_path.GetConfigurationSpecification())
self.assertGreaterEqual(smoothed_path.GetNumWaypoints(), 2)
n = smoothed_path.GetNumWaypoints()
assert_allclose(smoothed_path.GetWaypoint(0), self.waypoint1)
assert_allclose(smoothed_path.GetWaypoint(n - 1), self.waypoint3)
self.assertLess(self._ComputeArcLength(smoothed_path),
0.5 * self._ComputeArcLength(self.input_path))
# TODO: Test some of the error cases.
class SmoothTrajectoryTest(object):
def setUp(self):
cspec = self.robot.GetActiveConfigurationSpecification('linear')
self.feasible_path = openravepy.RaveCreateTrajectory(self.env, '')
self.feasible_path.Init(cspec)
self.feasible_path.Insert(0, self.waypoint1)
self.feasible_path.Insert(1, self.waypoint2)
self.feasible_path.Insert(2, self.waypoint3)
def test_SmoothTrajectory_DoesNotModifyStartPoint(self):
# Setup/Test
traj = self.planner.RetimeTrajectory(self.robot, self.feasible_path)
# Assert
cspec = self.robot.GetActiveConfigurationSpecification('linear')
self.assertGreaterEqual(traj.GetNumWaypoints(), 2)
first_waypoint = traj.GetWaypoint(0, cspec)
last_waypoint = traj.GetWaypoint(traj.GetNumWaypoints() - 1, cspec)
assert_allclose(first_waypoint, self.waypoint1)
assert_allclose(last_waypoint, self.waypoint3)
class RetimeTrajectoryTest(object):
def setUp(self):
cspec = self.robot.GetActiveConfigurationSpecification('linear')
self.feasible_path = openravepy.RaveCreateTrajectory(self.env, '')
self.feasible_path.Init(cspec)
self.feasible_path.Insert(0, self.waypoint1)
self.feasible_path.Insert(1, self.waypoint2)
self.feasible_path.Insert(2, self.waypoint3)
self.dt = 0.01
self.tolerance = 0.1 # 10% error
def test_RetimeTrajectory(self):
# Setup/Test
traj = self.planner.RetimeTrajectory(self.robot, self.feasible_path)
# Assert
position_cspec = self.feasible_path.GetConfigurationSpecification()
velocity_cspec = position_cspec.ConvertToDerivativeSpecification(1)
zero_dof_values = numpy.zeros(position_cspec.GetDOF())
# Verify that the trajectory passes through the original waypoints.
waypoints = [self.waypoint1, self.waypoint2, self.waypoint3]
waypoint_indices = [None] * len(waypoints)
for iwaypoint in xrange(traj.GetNumWaypoints()):
joint_values = traj.GetWaypoint(iwaypoint, position_cspec)
# Compare the waypoint against every input waypoint.
for icandidate, candidate_waypoint in enumerate(waypoints):
if numpy.allclose(joint_values, candidate_waypoint):
self.assertIsNone(waypoint_indices[icandidate])
waypoint_indices[icandidate] = iwaypoint
self.assertEquals(waypoint_indices[0], 0)
self.assertEquals(waypoint_indices[-1], traj.GetNumWaypoints() - 1)
for iwaypoint in waypoint_indices:
self.assertIsNotNone(iwaypoint)
# Verify that the velocity at the waypoint is zero.
joint_velocities = traj.GetWaypoint(iwaypoint, velocity_cspec)
assert_allclose(joint_velocities, zero_dof_values)
# Verify the trajectory between waypoints.
for t in numpy.arange(self.dt, traj.GetDuration(), self.dt):
iafter = traj.GetFirstWaypointIndexAfterTime(t)
ibefore = iafter - 1
joint_values = traj.Sample(t, position_cspec)
joint_values_before = traj.GetWaypoint(ibefore, position_cspec)
joint_values_after = traj.GetWaypoint(iafter, position_cspec)
distance_full = numpy.linalg.norm(
joint_values_after - joint_values_before)
distance_before = numpy.linalg.norm(
joint_values - joint_values_before)
distance_after = numpy.linalg.norm(
joint_values - joint_values_after)
deviation = distance_before + distance_after - distance_full
self.assertLess(deviation, self.tolerance * distance_full)
# Check joint limits and dynamic feasibility.
params = openravepy.Planner.PlannerParameters()
params.SetRobotActiveJoints(self.robot)
openravepy.planningutils.VerifyTrajectory(params, traj, self.dt)
# TODO: Test failure cases.
# Planner-specific tests
#
# Each of these classes MUST EXTEND BasePlannerTest, one or more
# method-specific test classes (e.g. PlanToConfigurationTests), and the
# unittest.TestCase class. The unittest.TestCase class MUST APPEAR LAST in the
# list of base classes.
class CBiRRTPlannerTests(BasePlannerTest,
PlanToConfigurationTest,
PlanToEndEffectorPoseTest,
unittest.TestCase):
planner_factory = CBiRRTPlanner
class OMPLPlannerTests(BasePlannerTest,
PlanToConfigurationTest,
unittest.TestCase):
planner_factory = OMPLPlanner
class OMPLSimplifierTests(BasePlannerTest,
ShortcutPathTest,
unittest.TestCase):
planner_factory = OMPLSimplifier
def setUp(self):
BasePlannerTest.setUp(self)
ShortcutPathTest.setUp(self)
class ParabolicRetimerTests(BasePlannerTest,
RetimeTrajectoryTest,
unittest.TestCase):
planner_factory = ParabolicRetimer
def setUp(self):
BasePlannerTest.setUp(self)
RetimeTrajectoryTest.setUp(self)
class MacSmootherTests(BasePlannerTest,
SmoothTrajectoryTest,
unittest.TestCase):
planner_factory = MacSmoother
def setUp(self):
BasePlannerTest.setUp(self)
SmoothTrajectoryTest.setUp(self)
if __name__ == '__main__':
openravepy.RaveInitialize(True)
openravepy.misc.InitOpenRAVELogging()
openravepy.RaveSetDebugLevel(openravepy.DebugLevel.Warn)
unittest.main()
|
|
import numpy as np
import tensorflow as tf
import awesome_gans.modules as t
tf.set_random_seed(777) # reproducibility
np.random.seed(777) # reproducibility
he_normal = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True)
l2_reg = tf.contrib.layers.l2_regularizer
def pixel_norm(x, eps=1e-8):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + eps)
def resize_nn(x, size):
return tf.image.resize_nearest_neighbor(x, size=(int(size), int(size)))
def bacth_concat(x, eps=1e-8, averaging='all'):
"""
ref : https://github.com/zhangqianhui/progressive_growing_of_gans_tensorflow/blob/master/ops.py#L145
"""
adj_std = lambda x_, **kwargs: tf.sqrt(tf.reduce_mean((x_ - tf.reduce_mean(x_, **kwargs)) ** 2, **kwargs) + eps)
val_ = adj_std(x, axis=0, keepdims=True)
if averaging == 'all':
val_ = tf.reduce_mean(val_, keepdims=True)
val_ = tf.tile(val_, multiples=[tf.shape(x)[0], 4, 4, 1])
return tf.concat([x, val_], axis=3)
class PGGAN:
def __init__(
self,
s,
batch_size=16,
input_height=128,
input_width=128,
input_channel=3,
pg=1,
pg_t=False,
sample_num=1 * 1,
sample_size=1,
output_height=128,
output_width=128,
df_dim=64,
gf_dim=64,
z_dim=512,
lr=1e-4,
epsilon=1e-9,
):
"""
# General Settings
:param s: TF Session
:param batch_size: training batch size, default 16
:param input_height: input image height, default 128
:param input_width: input image width, default 128
:param input_channel: input image channel, default 3 (RGB)
- in case of Celeb-A, image size is 128x128x3(HWC).
# Output Settings
:param pg: size of the image for model?, default 1
:param pg_t: pg status, default False
:param sample_num: the number of output images, default 1
:param sample_size: sample image size, default 1
:param output_height: output images height, default 128
:param output_width: output images width, default 128
# For CNN model
:param df_dim: discriminator filter, default 64
:param gf_dim: generator filter, default 64
# Training Option
:param z_dim: z dimension (kinda noise), default 512
:param lr: learning rate, default 1e-4
:param epsilon: epsilon, default 1e-9
"""
self.s = s
self.batch_size = batch_size
self.input_height = input_height
self.input_width = input_width
self.input_channel = input_channel
self.image_shape = [self.batch_size, self.input_height, self.input_width, self.input_channel]
self.sample_num = sample_num
self.sample_size = sample_size
self.output_height = output_height
self.output_width = output_width
self.pg = pg
self.pg_t = pg_t
self.output_size = 4 * pow(2, self.pg - 1)
self.df_dim = df_dim
self.gf_dim = gf_dim
self.z_dim = z_dim
self.beta1 = 0.0
self.beta2 = 0.99
self.lr = lr
self.eps = epsilon
# pre-defined
self.d_real = 0.0
self.d_fake = 0.0
self.g_loss = 0.0
self.d_loss = 0.0
self.gp = 0.0
self.gp_target = 1.0
self.gp_lambda = 10.0 # slower convergence but good
self.gp_w = 1e-3
self.g = None
self.d_op = None
self.g_op = None
self.merged = None
self.writer = None
self.saver = None
self.r_saver = None
self.out_saver = None
# Placeholders
self.x = tf.placeholder(
tf.float32, shape=[None, self.output_size, self.output_size, self.input_channel], name="x-image"
)
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim], name='z-noise')
self.step_pl = tf.placeholder(tf.float32, shape=None)
self.alpha_trans = tf.Variable(initial_value=0.0, trainable=False, name='alpha_trans')
self.alpha_trans_update = None
self.build_pggan() # build PGGAN model
def discriminator(self, x, pg=1, pg_t=False, reuse=None):
def nf(n):
return min(1024 // (2 ** n), self.z_dim)
with tf.variable_scope("disc", reuse=reuse):
if pg_t:
x_out = tf.layers.average_pooling2d(x, pool_size=2, strides=2)
x_out = t.conv2d(x_out, nf(pg - 2), k=1, s=1, name='disc_out_conv2d-%d' % x_out.get_shape()[1])
x_out = tf.nn.leaky_relu(x_out)
x = t.conv2d(x, nf(pg - 1), k=1, s=1, name='disc_out_conv2d-%d' % x.get_shape()[1])
x = tf.nn.leaky_relu(x)
for i in range(pg - 1):
x = t.conv2d(x, nf(pg - 1 - i), k=1, s=1, name='disc_n_1_conv2d-%d' % x.get_shape()[1])
x = tf.nn.leaky_relu(x)
x = t.conv2d(x, nf(pg - 2 - i), k=1, s=1, name='disc_n_2_conv2d-%d' % x.get_shape()[1])
x = tf.nn.leaky_relu(x)
x = tf.layers.average_pooling2d(x, pool_size=2, strides=2)
if i == 0 and pg_t:
x = (1.0 - self.alpha_trans) * x_out + self.alpha_trans * x
x = bacth_concat(x)
x = t.conv2d(x, nf(1), k=3, s=1, name='disc_n_1_conv2d-%d' % x.get_shape()[1])
x = tf.nn.leaky_relu(x)
x = t.conv2d(x, nf(1), k=4, s=1, pad='VALID', name='disc_n_2_conv2d-%d' % x.get_shape()[1])
x = tf.nn.leaky_relu(x)
x = tf.layers.flatten(x)
x = tf.layers.dense(x, 1, name='disc_n_fc')
return x
def generator(self, z, pg=1, pg_t=False, reuse=None):
def nf(n):
return min(1024 // (2 ** n), self.z_dim)
def block(x, fs, name="0"):
x = resize_nn(x, x.get_shape()[1] * 2)
x = t.conv2d(x, fs, k=3, s=1, name='gen_n_%s_conv2d-%d' % (name, x.get_shape()[1]))
x = tf.nn.leaky_relu(x)
x = pixel_norm(x)
return x
with tf.variable_scope("gen", reuse=reuse):
x = tf.reshape(z, [-1, 1, 1, nf(1)])
x = t.conv2d(x, nf(1), k=4, s=1, name='gen_n_1_conv2d')
x = tf.nn.leaky_relu(x)
x = pixel_norm(x)
x = tf.reshape(x, [-1, 4, 4, nf(1)])
x = t.conv2d(x, nf(1), k=3, s=1, name='gen_n_2_conv2d')
x = tf.nn.leaky_relu(x)
x = pixel_norm(x)
x_out = None
for i in range(pg - 1):
if i == pg - 2 and pg_t:
x_out = t.conv2d(x, 3, k=1, s=1, name='gen_out_conv2d-%d' % x.get_shape()[1]) # to RGB images
x_out = resize_nn(x_out, x_out.get_shape()[1] * 2) # up-sampling
x = block(x, nf(i + 1), name="1")
x = block(x, nf(i + 1), name="2")
x = t.conv2d(x, 3, k=1, s=1, name='gen_out_conv2d-%d' % x.get_shape()[1]) # to RGB images
if pg == 1:
return x
if pg_t:
x = (1.0 - self.alpha_trans) * x_out + self.alpha_trans * x
return x
def build_pggan(self):
self.alpha_trans_update = self.alpha_trans.assign(self.step_pl / 32000)
# Generator
self.g = self.generator(self.z, self.pg, self.pg_t)
# Discriminator
d_real = self.discriminator(self.x, self.pg, self.pg_t)
d_fake = self.discriminator(self.g, self.pg, self.pg_t, reuse=True)
# Loss ()
d_real_loss = tf.reduce_mean(d_real)
d_fake_loss = tf.reduce_mean(d_fake)
self.d_loss = d_real_loss - d_fake_loss
self.g_loss = d_fake_loss
# Gradient Penalty
diff = self.g - self.x
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0.0, maxval=1.0)
interp = self.x + (alpha * diff)
d_interp = self.discriminator(interp, self.pg, self.pg_t, reuse=True)
grads = tf.gradients(d_interp, [interp])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(grads), reduction_indices=[1, 2, 3]))
self.gp = tf.reduce_mean(tf.square(slopes - self.gp_target))
self.d_loss += (self.gp_lambda / (self.gp_target ** 2)) * self.gp + self.gp_w * tf.reduce_mean(
tf.square(d_real - 0.0)
)
# Summary
tf.summary.scalar("loss/d_loss", self.d_loss)
tf.summary.scalar("loss/d_real_loss", d_real_loss)
tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
tf.summary.scalar("loss/g_loss", self.g_loss)
tf.summary.scalar("misc/gp", self.gp)
# Training Parameters
t_vars = tf.trainable_variables()
d_params = [v for v in t_vars if v.name.startswith('disc')]
g_params = [v for v in t_vars if v.name.startswith('gen')]
d_n_params = [v for v in d_params if 'disc_n' in v.name]
g_n_params = [v for v in g_params if 'gen_n' in v.name]
d_n_out_params = [v for v in d_params if 'disc_out' in v.name]
g_n_out_params = [v for v in g_params if 'gen_out' in v.name]
d_n_nwm_params = [v for v in d_n_params if '%d' % self.output_size not in v.name] # nwm : not new model
g_n_nwm_params = [v for v in g_n_params if '%d' % self.output_size not in v.name] # nwm : not new model
d_n_out_nwm_params = [v for v in d_n_out_params if '%d' % self.output_size not in v.name]
g_n_out_nwm_params = [v for v in g_n_out_params if '%d' % self.output_size not in v.name]
# Optimizer
self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
self.d_loss, var_list=d_params
)
self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
self.g_loss, var_list=g_params
)
# Merge summary
self.merged = tf.summary.merge_all()
# Model saver
mtk = 2
self.saver = tf.train.Saver(var_list=d_params + g_params, max_to_keep=mtk, name='saver')
self.r_saver = tf.train.Saver(var_list=d_n_nwm_params + g_n_nwm_params, max_to_keep=mtk, name='r_saver')
if len(d_n_out_nwm_params + g_n_out_nwm_params):
self.out_saver = tf.train.Saver(
var_list=d_n_out_nwm_params + g_n_out_nwm_params, max_to_keep=mtk, name='out_saver'
)
self.writer = tf.summary.FileWriter('./model/', self.s.graph)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'FileBlob.storage_options'
db.delete_column(u'sentry_fileblob', 'storage_options')
# Deleting field 'FileBlob.storage'
db.delete_column(u'sentry_fileblob', 'storage')
# Deleting field 'File.storage_options'
db.delete_column(u'sentry_file', 'storage_options')
# Deleting field 'File.storage'
db.delete_column(u'sentry_file', 'storage')
def backwards(self, orm):
raise RuntimeError(
"Cannot reverse this migration. 'FileBlob.storage' and its values cannot be restored."
)
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2015, 12, 9, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']",
'null': 'True'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'),)"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
|
# Natural Language Toolkit: API for Corpus Readers
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
API for corpus readers.
"""
from __future__ import unicode_literals
import os
import re
from collections import defaultdict
from nltk import compat
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from .util import *
@compat.python_2_unicode_compatible
class CorpusReader(object):
"""
A base class for "corpus reader" classes, each of which can be
used to read a specific corpus format. Each individual corpus
reader instance is used to read a specific corpus, consisting of
one or more files under a common root directory. Each file is
identified by its ``file identifier``, which is the relative path
to the file from the root directory.
A separate subclass is be defined for each corpus format. These
subclasses define one or more methods that provide 'views' on the
corpus contents, such as ``words()`` (for a list of words) and
``parsed_sents()`` (for a list of parsed sentences). Called with
no arguments, these methods will return the contents of the entire
corpus. For most corpora, these methods define one or more
selection arguments, such as ``fileids`` or ``categories``, which can
be used to select which portion of the corpus should be returned.
"""
def __init__(self, root, fileids, encoding='utf8', tag_mapping_function=None):
"""
:type root: PathPointer or str
:param root: A path pointer identifying the root directory for
this corpus. If a string is specified, then it will be
converted to a ``PathPointer`` automatically.
:param fileids: A list of the files that make up this corpus.
This list can either be specified explicitly, as a list of
strings; or implicitly, as a regular expression over file
paths. The absolute path for each file will be constructed
by joining the reader's root to each file name.
:param encoding: The default unicode encoding for the files
that make up the corpus. The value of ``encoding`` can be any
of the following:
- A string: ``encoding`` is the encoding name for all files.
- A dictionary: ``encoding[file_id]`` is the encoding
name for the file whose identifier is ``file_id``. If
``file_id`` is not in ``encoding``, then the file
contents will be processed using non-unicode byte strings.
- A list: ``encoding`` should be a list of ``(regexp, encoding)``
tuples. The encoding for a file whose identifier is ``file_id``
will be the ``encoding`` value for the first tuple whose
``regexp`` matches the ``file_id``. If no tuple's ``regexp``
matches the ``file_id``, the file contents will be processed
using non-unicode byte strings.
- None: the file contents of all files will be
processed using non-unicode byte strings.
:param tag_mapping_function: A function for normalizing or
simplifying the POS tags returned by the tagged_words()
or tagged_sents() methods.
"""
# Convert the root to a path pointer, if necessary.
if isinstance(root, compat.string_types) and not isinstance(root, PathPointer):
m = re.match('(.*\.zip)/?(.*)$|', root)
zipfile, zipentry = m.groups()
if zipfile:
root = ZipFilePathPointer(zipfile, zipentry)
else:
root = FileSystemPathPointer(root)
elif not isinstance(root, PathPointer):
raise TypeError('CorpusReader: expected a string or a PathPointer')
# If `fileids` is a regexp, then expand it.
if isinstance(fileids, compat.string_types):
fileids = find_corpus_fileids(root, fileids)
self._fileids = fileids
"""A list of the relative paths for the fileids that make up
this corpus."""
self._root = root
"""The root directory for this corpus."""
# If encoding was specified as a list of regexps, then convert
# it to a dictionary.
if isinstance(encoding, list):
encoding_dict = {}
for fileid in self._fileids:
for x in encoding:
(regexp, enc) = x
if re.match(regexp, fileid):
encoding_dict[fileid] = enc
break
encoding = encoding_dict
self._encoding = encoding
"""The default unicode encoding for the fileids that make up
this corpus. If ``encoding`` is None, then the file
contents are processed using byte strings."""
self._tag_mapping_function = tag_mapping_function
def __repr__(self):
if isinstance(self._root, ZipFilePathPointer):
path = '%s/%s' % (self._root.zipfile.filename, self._root.entry)
else:
path = '%s' % self._root.path
return '<%s in %r>' % (self.__class__.__name__, path)
def readme(self):
"""
Return the contents of the corpus README file, if it exists.
"""
return self.open("README").read()
def fileids(self):
"""
Return a list of file identifiers for the fileids that make up
this corpus.
"""
return self._fileids
def abspath(self, fileid):
"""
Return the absolute path for the given file.
:type file: str
:param file: The file identifier for the file whose path
should be returned.
:rtype: PathPointer
"""
return self._root.join(fileid)
def abspaths(self, fileids=None, include_encoding=False,
include_fileid=False):
"""
Return a list of the absolute paths for all fileids in this corpus;
or for the given list of fileids, if specified.
:type fileids: None or str or list
:param fileids: Specifies the set of fileids for which paths should
be returned. Can be None, for all fileids; a list of
file identifiers, for a specified set of fileids; or a single
file identifier, for a single file. Note that the return
value is always a list of paths, even if ``fileids`` is a
single file identifier.
:param include_encoding: If true, then return a list of
``(path_pointer, encoding)`` tuples.
:rtype: list(PathPointer)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, compat.string_types):
fileids = [fileids]
paths = [self._root.join(f) for f in fileids]
if include_encoding and include_fileid:
return zip(paths, [self.encoding(f) for f in fileids], fileids)
elif include_fileid:
return zip(paths, fileids)
elif include_encoding:
return zip(paths, [self.encoding(f) for f in fileids])
else:
return paths
def open(self, file):
"""
Return an open stream that can be used to read the given file.
If the file's encoding is not None, then the stream will
automatically decode the file's contents into unicode.
:param file: The file identifier of the file to read.
"""
encoding = self.encoding(file)
stream = self._root.join(file).open(encoding)
return stream
def encoding(self, file):
"""
Return the unicode encoding for the given corpus file, if known.
If the encoding is unknown, or if the given file should be
processed using byte strings (str), then return None.
"""
if isinstance(self._encoding, dict):
return self._encoding.get(file)
else:
return self._encoding
def _get_root(self): return self._root
root = property(_get_root, doc="""
The directory where this corpus is stored.
:type: PathPointer""")
######################################################################
#{ Corpora containing categorized items
######################################################################
class CategorizedCorpusReader(object):
"""
A mixin class used to aid in the implementation of corpus readers
for categorized corpora. This class defines the method
``categories()``, which returns a list of the categories for the
corpus or for a specified set of fileids; and overrides ``fileids()``
to take a ``categories`` argument, restricting the set of fileids to
be returned.
Subclasses are expected to:
- Call ``__init__()`` to set up the mapping.
- Override all view methods to accept a ``categories`` parameter,
which can be used *instead* of the ``fileids`` parameter, to
select which fileids should be included in the returned view.
"""
def __init__(self, kwargs):
"""
Initialize this mapping based on keyword arguments, as
follows:
- cat_pattern: A regular expression pattern used to find the
category for each file identifier. The pattern will be
applied to each file identifier, and the first matching
group will be used as the category label for that file.
- cat_map: A dictionary, mapping from file identifiers to
category labels.
- cat_file: The name of a file that contains the mapping
from file identifiers to categories. The argument
``cat_delimiter`` can be used to specify a delimiter.
The corresponding argument will be deleted from ``kwargs``. If
more than one argument is specified, an exception will be
raised.
"""
self._f2c = None #: file-to-category mapping
self._c2f = None #: category-to-file mapping
self._pattern = None #: regexp specifying the mapping
self._map = None #: dict specifying the mapping
self._file = None #: fileid of file containing the mapping
self._delimiter = None #: delimiter for ``self._file``
if 'cat_pattern' in kwargs:
self._pattern = kwargs['cat_pattern']
del kwargs['cat_pattern']
elif 'cat_map' in kwargs:
self._map = kwargs['cat_map']
del kwargs['cat_map']
elif 'cat_file' in kwargs:
self._file = kwargs['cat_file']
del kwargs['cat_file']
if 'cat_delimiter' in kwargs:
self._delimiter = kwargs['cat_delimiter']
del kwargs['cat_delimiter']
else:
raise ValueError('Expected keyword argument cat_pattern or '
'cat_map or cat_file.')
if ('cat_pattern' in kwargs or 'cat_map' in kwargs or
'cat_file' in kwargs):
raise ValueError('Specify exactly one of: cat_pattern, '
'cat_map, cat_file.')
def _init(self):
self._f2c = defaultdict(set)
self._c2f = defaultdict(set)
if self._pattern is not None:
for file_id in self._fileids:
category = re.match(self._pattern, file_id).group(1)
self._add(file_id, category)
elif self._map is not None:
for (file_id, categories) in self._map.items():
for category in categories:
self._add(file_id, category)
elif self._file is not None:
for line in self.open(self._file).readlines():
line = line.strip()
file_id, categories = line.split(self._delimiter, 1)
if file_id not in self.fileids():
raise ValueError('In category mapping file %s: %s '
'not found' % (self._file, file_id))
for category in categories.split(self._delimiter):
self._add(file_id, category)
def _add(self, file_id, category):
self._f2c[file_id].add(category)
self._c2f[category].add(file_id)
def categories(self, fileids=None):
"""
Return a list of the categories that are defined for this corpus,
or for the file(s) if it is given.
"""
if self._f2c is None:
self._init()
if fileids is None:
return sorted(self._c2f)
if isinstance(fileids, compat.string_types):
fileids = [fileids]
return sorted(set.union(*[self._f2c[d] for d in fileids]))
def fileids(self, categories=None):
"""
Return a list of file identifiers for the files that make up
this corpus, or that make up the given category(s) if specified.
"""
if categories is None:
return super(CategorizedCorpusReader, self).fileids()
elif isinstance(categories, compat.string_types):
if self._f2c is None:
self._init()
if categories in self._c2f:
return sorted(self._c2f[categories])
else:
raise ValueError('Category %s not found' % categories)
else:
if self._f2c is None:
self._init()
return sorted(set.union(*[self._c2f[c] for c in categories]))
######################################################################
#{ Treebank readers
######################################################################
#[xx] is it worth it to factor this out?
class SyntaxCorpusReader(CorpusReader):
"""
An abstract base class for reading corpora consisting of
syntactically parsed text. Subclasses should define:
- ``__init__``, which specifies the location of the corpus
and a method for detecting the sentence blocks in corpus files.
- ``_read_block``, which reads a block from the input stream.
- ``_word``, which takes a block and returns a list of list of words.
- ``_tag``, which takes a block and returns a list of list of tagged
words.
- ``_parse``, which takes a block and returns a list of parsed
sentences.
"""
def _parse(self, s):
raise NotImplementedError()
def _word(self, s):
raise NotImplementedError()
def _tag(self, s):
raise NotImplementedError()
def _read_block(self, stream):
raise NotImplementedError()
def raw(self, fileids=None):
if fileids is None: fileids = self._fileids
elif isinstance(fileids, compat.string_types): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def parsed_sents(self, fileids=None):
reader = self._read_parsed_sent_block
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None, simplify_tags=False):
def reader(stream):
return self._read_tagged_sent_block(stream, simplify_tags)
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def sents(self, fileids=None):
reader = self._read_sent_block
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None, simplify_tags=False):
def reader(stream):
return self._read_tagged_word_block(stream, simplify_tags)
return concat([StreamBackedCorpusView(fileid, reader, encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
def words(self, fileids=None):
return concat([StreamBackedCorpusView(fileid,
self._read_word_block,
encoding=enc)
for fileid, enc in self.abspaths(fileids, True)])
#------------------------------------------------------------
#{ Block Readers
def _read_word_block(self, stream):
return sum(self._read_sent_block(stream), [])
def _read_tagged_word_block(self, stream, simplify_tags=False):
return sum(self._read_tagged_sent_block(stream, simplify_tags), [])
def _read_sent_block(self, stream):
return list(filter(None, [self._word(t) for t in self._read_block(stream)]))
def _read_tagged_sent_block(self, stream, simplify_tags=False):
return list(filter(None, [self._tag(t, simplify_tags)
for t in self._read_block(stream)]))
def _read_parsed_sent_block(self, stream):
return list(filter(None, [self._parse(t) for t in self._read_block(stream)]))
#} End of Block Readers
#------------------------------------------------------------
|
|
"""Undocumented Module"""
__all__ = ['DirectGuiBase', 'DirectGuiWidget']
from panda3d.core import *
from panda3d.direct import get_config_showbase
from . import DirectGuiGlobals as DGG
from .OnscreenText import *
from .OnscreenGeom import *
from .OnscreenImage import *
from direct.directtools.DirectUtil import ROUND_TO
from direct.showbase import DirectObject
from direct.task import Task
import sys
if sys.version_info >= (3, 0):
stringType = str
else:
stringType = basestring
guiObjectCollector = PStatCollector("Client::GuiObjects")
"""
Base class for all Direct Gui items. Handles composite widgets and
command line argument parsing.
"""
"""
Code Overview:
1 Each widget defines a set of options (optiondefs) as a list of tuples
of the form ('name', defaultValue, handler).
'name' is the name of the option (used during construction of configure)
handler can be: None, method, or INITOPT. If a method is specified,
it will be called during widget construction (via initialiseoptions),
if the Handler is specified as an INITOPT, this is an option that can
only be set during widget construction.
2) DirectGuiBase.defineoptions is called. defineoption creates:
self._constructorKeywords = { keyword: [value, useFlag] }
a dictionary of the keyword options specified as part of the constructor
keywords can be of the form 'component_option', where component is
the name of a widget's component, a component group or a component alias
self._dynamicGroups, a list of group names for which it is permissible
to specify options before components of that group are created.
If a widget is a derived class the order of execution would be:
foo.optiondefs = {}
foo.defineoptions()
fooParent()
fooParent.optiondefs = {}
fooParent.defineoptions()
3) addoptions is called. This combines options specified as keywords to
the widget constructor (stored in self._constuctorKeywords)
with the default options (stored in optiondefs). Results are stored in
self._optionInfo = { keyword: [default, current, handler] }
If a keyword is of the form 'component_option' it is left in the
self._constructorKeywords dictionary (for use by component constructors),
otherwise it is 'used', and deleted from self._constructorKeywords.
Notes: - constructor keywords override the defaults.
- derived class default values override parent class defaults
- derived class handler functions override parent class functions
4) Superclass initialization methods are called (resulting in nested calls
to define options (see 2 above)
5) Widget components are created via calls to self.createcomponent.
User can specify aliases and groups for each component created.
Aliases are alternate names for components, e.g. a widget may have a
component with a name 'entryField', which itself may have a component
named 'entry', you could add an alias 'entry' for the 'entryField_entry'
These are stored in self.__componentAliases. If an alias is found,
all keyword entries which use that alias are expanded to their full
form (to avoid conversion later)
Groups allow option specifications that apply to all members of the group.
If a widget has components: 'text1', 'text2', and 'text3' which all belong
to the 'text' group, they can be all configured with keywords of the form:
'text_keyword' (e.g. text_font = 'comic.rgb'). A component's group
is stored as the fourth element of its entry in self.__componentInfo
Note: the widget constructors have access to all remaining keywords in
_constructorKeywords (those not transferred to _optionInfo by
define/addoptions). If a component defines an alias that applies to
one of the keywords, that keyword is replaced with a new keyword with
the alias expanded.
If a keyword (or substituted alias keyword) is used during creation of the
component, it is deleted from self._constructorKeywords. If a group
keyword applies to the component, that keyword is marked as used, but is
not deleted from self._constructorKeywords, in case it applies to another
component. If any constructor keywords remain at the end of component
construction (and initialisation), an error is raised.
5) initialiseoptions is called. This method calls any option handlers to
respond to any keyword/default values, then checks to see if any keywords
are left unused. If so, an error is raised.
"""
class DirectGuiBase(DirectObject.DirectObject):
def __init__(self):
# Default id of all gui object, subclasses should override this
self.guiId = 'guiObject'
# List of all post initialization functions
self.postInitialiseFuncList = []
# To avoid doing things redundantly during initialisation
self.fInit = 1
# Mapping from each megawidget option to a list of information
# about the option
# - default value
# - current value
# - function to call when the option is initialised in the
# call to initialiseoptions() in the constructor or
# modified via configure(). If this is INITOPT, the
# option is an initialisation option (an option that can
# be set by the call to the constructor but can not be
# used with configure).
# This mapping is not initialised here, but in the call to
# defineoptions() which precedes construction of this base class.
#
# self._optionInfo = {}
# Mapping from each component name to a tuple of information
# about the component.
# - component widget instance
# - configure function of widget instance
# - the class of the widget (Frame, EntryField, etc)
# - cget function of widget instance
# - the name of the component group of this component, if any
self.__componentInfo = {}
# Mapping from alias names to the names of components or
# sub-components.
self.__componentAliases = {}
# Contains information about the keywords provided to the
# constructor. It is a mapping from the keyword to a tuple
# containing:
# - value of keyword
# - a boolean indicating if the keyword has been used.
# A keyword is used if, during the construction of a megawidget,
# - it is defined in a call to defineoptions() or addoptions(), or
# - it references, by name, a component of the megawidget, or
# - it references, by group, at least one component
# At the end of megawidget construction, a call is made to
# initialiseoptions() which reports an error if there are
# unused options given to the constructor.
#
# self._constructorKeywords = {}
# List of dynamic component groups. If a group is included in
# this list, then it not an error if a keyword argument for
# the group is given to the constructor or to configure(), but
# no components with this group have been created.
# self._dynamicGroups = ()
def defineoptions(self, keywords, optionDefs, dynamicGroups = ()):
""" defineoptions(keywords, optionDefs, dynamicGroups = {}) """
# Create options, providing the default value and the method
# to call when the value is changed. If any option created by
# base classes has the same name as one in <optionDefs>, the
# base class's value and function will be overriden.
# keywords is a dictionary of keyword/value pairs from the constructor
# optionDefs is a dictionary of default options for the widget
# dynamicGroups is a tuple of component groups for which you can
# specify options even though no components of this group have
# been created
# This should be called before the constructor of the base
# class, so that default values defined in the derived class
# override those in the base class.
if not hasattr(self, '_constructorKeywords'):
tmp = {}
for option, value in keywords.items():
tmp[option] = [value, 0]
self._constructorKeywords = tmp
self._optionInfo = {}
# Initialize dictionary of dynamic groups
if not hasattr(self, '_dynamicGroups'):
self._dynamicGroups = ()
self._dynamicGroups = self._dynamicGroups + tuple(dynamicGroups)
# Reconcile command line and default options
self.addoptions(optionDefs, keywords)
def addoptions(self, optionDefs, optionkeywords):
""" addoptions(optionDefs) - add option def to option info """
# Add additional options, providing the default value and the
# method to call when the value is changed. See
# "defineoptions" for more details
# optimisations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.__contains__
keywords = self._constructorKeywords
keywords_has_key = keywords.__contains__
FUNCTION = DGG._OPT_FUNCTION
for name, default, function in optionDefs:
if '_' not in name:
default = optionkeywords.get(name, default)
# The option will already exist if it has been defined
# in a derived class. In this case, do not override the
# default value of the option or the callback function
# if it is not None.
if not optionInfo_has_key(name):
if keywords_has_key(name):
# Overridden by keyword, use keyword value
value = keywords[name][0]
optionInfo[name] = [default, value, function]
# Delete it from self._constructorKeywords
del keywords[name]
else:
# Use optionDefs value
optionInfo[name] = [default, default, function]
elif optionInfo[name][FUNCTION] is None:
# Only override function if not defined by derived class
optionInfo[name][FUNCTION] = function
else:
# This option is of the form "component_option". If this is
# not already defined in self._constructorKeywords add it.
# This allows a derived class to override the default value
# of an option of a component of a base class.
if not keywords_has_key(name):
keywords[name] = [default, 0]
def initialiseoptions(self, myClass):
"""
Call all initialisation functions to initialize widget
options to default of keyword value
"""
# This is to make sure this method class is only called by
# the most specific class in the class hierarchy
if self.__class__ is myClass:
# Call the configuration callback function for every option.
FUNCTION = DGG._OPT_FUNCTION
self.fInit = 1
for info in self._optionInfo.values():
func = info[FUNCTION]
if func is not None and func is not DGG.INITOPT:
func()
self.fInit = 0
# Now check if anything is left over
unusedOptions = []
keywords = self._constructorKeywords
for name in keywords:
used = keywords[name][1]
if not used:
# This keyword argument has not been used. If it
# does not refer to a dynamic group, mark it as
# unused.
index = name.find('_')
if index < 0 or name[:index] not in self._dynamicGroups:
unusedOptions.append(name)
self._constructorKeywords = {}
if len(unusedOptions) > 0:
if len(unusedOptions) == 1:
text = 'Unknown option "'
else:
text = 'Unknown options "'
raise KeyError(text + ', '.join(unusedOptions) + \
'" for ' + myClass.__name__)
# Can now call post init func
self.postInitialiseFunc()
def postInitialiseFunc(self):
for func in self.postInitialiseFuncList:
func()
def isinitoption(self, option):
"""
Is this opition one that can only be specified at construction?
"""
return self._optionInfo[option][DGG._OPT_FUNCTION] is DGG.INITOPT
def options(self):
"""
Print out a list of available widget options.
Does not include subcomponent options.
"""
options = []
if hasattr(self, '_optionInfo'):
for option, info in self._optionInfo.items():
isinit = info[DGG._OPT_FUNCTION] is DGG.INITOPT
default = info[DGG._OPT_DEFAULT]
options.append((option, default, isinit))
options.sort()
return options
def configure(self, option=None, **kw):
"""
configure(option = None)
Query or configure the megawidget options.
"""
#
# If not empty, *kw* is a dictionary giving new
# values for some of the options of this gui item
# For options defined for this widget, set
# the value of the option to the new value and call the
# configuration callback function, if any.
#
# If *option* is None, return all gui item configuration
# options and settings. Options are returned as standard 3
# element tuples
#
# If *option* is a string, return the 3 element tuple for the
# given configuration option.
# First, deal with the option queries.
if len(kw) == 0:
# This configure call is querying the values of one or all options.
# Return 3-tuples:
# (optionName, default, value)
if option is None:
rtn = {}
for option, config in self._optionInfo.items():
rtn[option] = (option,
config[DGG._OPT_DEFAULT],
config[DGG._OPT_VALUE])
return rtn
else:
config = self._optionInfo[option]
return (option, config[DGG._OPT_DEFAULT], config[DGG._OPT_VALUE])
# optimizations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.__contains__
componentInfo = self.__componentInfo
componentInfo_has_key = componentInfo.__contains__
componentAliases = self.__componentAliases
componentAliases_has_key = componentAliases.__contains__
VALUE = DGG._OPT_VALUE
FUNCTION = DGG._OPT_FUNCTION
# This will contain a list of options in *kw* which
# are known to this gui item.
directOptions = []
# This will contain information about the options in
# *kw* of the form <component>_<option>, where
# <component> is a component of this megawidget. It is a
# dictionary whose keys are the configure method of each
# component and whose values are a dictionary of options and
# values for the component.
indirectOptions = {}
indirectOptions_has_key = indirectOptions.__contains__
for option, value in kw.items():
if optionInfo_has_key(option):
# This is one of the options of this gui item.
# Check it is an initialisation option.
if optionInfo[option][FUNCTION] is DGG.INITOPT:
print('Cannot configure initialisation option "' \
+ option + '" for ' + self.__class__.__name__)
break
#raise KeyError, \
# 'Cannot configure initialisation option "' \
# + option + '" for ' + self.__class__.__name__
optionInfo[option][VALUE] = value
directOptions.append(option)
else:
index = option.find('_')
if index >= 0:
# This option may be of the form <component>_<option>.
# e.g. if alias ('efEntry', 'entryField_entry')
# and option = efEntry_width
# component = efEntry, componentOption = width
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if componentAliases_has_key(component):
# component = entryField, subcomponent = entry
component, subComponent = componentAliases[component]
if subComponent is not None:
# componentOption becomes entry_width
componentOption = subComponent + '_' \
+ componentOption
# Expand option string to write on error
# option = entryField_entry_width
option = component + '_' + componentOption
# Does this component exist
if componentInfo_has_key(component):
# Get the configure func for the named component
# component = entryField
componentConfigFuncs = [componentInfo[component][1]]
else:
# Check if this is a group name and configure all
# components in the group.
componentConfigFuncs = []
# For each component
for info in componentInfo.values():
# Check if it is a member of this group
if info[4] == component:
# Yes, append its config func
componentConfigFuncs.append(info[1])
if len(componentConfigFuncs) == 0 and \
component not in self._dynamicGroups:
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Add the configure method(s) (may be more than
# one if this is configuring a component group)
# and option/value to dictionary.
for componentConfigFunc in componentConfigFuncs:
if not indirectOptions_has_key(componentConfigFunc):
indirectOptions[componentConfigFunc] = {}
# Create a dictionary of keyword/values keyed
# on configuration function
indirectOptions[componentConfigFunc][componentOption] \
= value
else:
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Call the configure methods for any components.
# Pass in the dictionary of keyword/values created above
for func, options in indirectOptions.items():
func(**options)
# Call the configuration callback function for each option.
for option in directOptions:
info = optionInfo[option]
func = info[DGG._OPT_FUNCTION]
if func is not None:
func()
# Allow index style references
def __setitem__(self, key, value):
self.configure(**{key: value})
def cget(self, option):
"""
Get current configuration setting for this option
"""
# Return the value of an option, for example myWidget['font'].
if option in self._optionInfo:
return self._optionInfo[option][DGG._OPT_VALUE]
else:
index = option.find('_')
if index >= 0:
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if component in self.__componentAliases:
component, subComponent = self.__componentAliases[
component]
if subComponent is not None:
componentOption = subComponent + '_' + componentOption
# Expand option string to write on error
option = component + '_' + componentOption
if component in self.__componentInfo:
# Call cget on the component.
componentCget = self.__componentInfo[component][3]
return componentCget(componentOption)
else:
# If this is a group name, call cget for one of
# the components in the group.
for info in self.__componentInfo.values():
if info[4] == component:
componentCget = info[3]
return componentCget(componentOption)
# Option not found
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Allow index style refererences
__getitem__ = cget
def createcomponent(self, componentName, componentAliases, componentGroup,
widgetClass, *widgetArgs, **kw):
"""
Create a component (during construction or later) for this widget.
"""
# Check for invalid component name
if '_' in componentName:
raise ValueError('Component name "%s" must not contain "_"' % componentName)
# Get construction keywords
if hasattr(self, '_constructorKeywords'):
keywords = self._constructorKeywords
else:
keywords = {}
for alias, component in componentAliases:
# Create aliases to the component and its sub-components.
index = component.find('_')
if index < 0:
# Just a shorter name for one of this widget's components
self.__componentAliases[alias] = (component, None)
else:
# An alias for a component of one of this widget's components
mainComponent = component[:index]
subComponent = component[(index + 1):]
self.__componentAliases[alias] = (mainComponent, subComponent)
# Remove aliases from the constructor keyword arguments by
# replacing any keyword arguments that begin with *alias*
# with corresponding keys beginning with *component*.
alias = alias + '_'
aliasLen = len(alias)
for option in keywords.copy():
if len(option) > aliasLen and option[:aliasLen] == alias:
newkey = component + '_' + option[aliasLen:]
keywords[newkey] = keywords[option]
del keywords[option]
# Find any keyword arguments for this component
componentPrefix = componentName + '_'
nameLen = len(componentPrefix)
# First, walk through the option list looking for arguments
# than refer to this component's group.
for option in keywords:
# Check if this keyword argument refers to the group
# of this component. If so, add this to the options
# to use when constructing the widget. Mark the
# keyword argument as being used, but do not remove it
# since it may be required when creating another
# component.
index = option.find('_')
if index >= 0 and componentGroup == option[:index]:
rest = option[(index + 1):]
kw[rest] = keywords[option][0]
keywords[option][1] = 1
# Now that we've got the group arguments, walk through the
# option list again and get out the arguments that refer to
# this component specifically by name. These are more
# specific than the group arguments, above; we walk through
# the list afterwards so they will override.
for option in keywords.copy():
if len(option) > nameLen and option[:nameLen] == componentPrefix:
# The keyword argument refers to this component, so add
# this to the options to use when constructing the widget.
kw[option[nameLen:]] = keywords[option][0]
# And delete it from main construction keywords
del keywords[option]
# Return None if no widget class is specified
if widgetClass is None:
return None
# Get arguments for widget constructor
if len(widgetArgs) == 1 and type(widgetArgs[0]) == tuple:
# Arguments to the constructor can be specified as either
# multiple trailing arguments to createcomponent() or as a
# single tuple argument.
widgetArgs = widgetArgs[0]
# Create the widget
widget = widgetClass(*widgetArgs, **kw)
componentClass = widget.__class__.__name__
self.__componentInfo[componentName] = (widget, widget.configure,
componentClass, widget.cget, componentGroup)
return widget
def component(self, name):
# Return a component widget of the megawidget given the
# component's name
# This allows the user of a megawidget to access and configure
# widget components directly.
# Find the main component and any subcomponents
index = name.find('_')
if index < 0:
component = name
remainingComponents = None
else:
component = name[:index]
remainingComponents = name[(index + 1):]
# Expand component alias
# Example entry which is an alias for entryField_entry
if component in self.__componentAliases:
# component = entryField, subComponent = entry
component, subComponent = self.__componentAliases[component]
if subComponent is not None:
if remainingComponents is None:
# remainingComponents = entry
remainingComponents = subComponent
else:
remainingComponents = subComponent + '_' \
+ remainingComponents
# Get the component from __componentInfo dictionary
widget = self.__componentInfo[component][0]
if remainingComponents is None:
# Not looking for subcomponent
return widget
else:
# Recursive call on subcomponent
return widget.component(remainingComponents)
def components(self):
# Return a list of all components.
names = list(self.__componentInfo.keys())
names.sort()
return names
def hascomponent(self, component):
return component in self.__componentInfo
def destroycomponent(self, name):
# Remove a megawidget component.
# This command is for use by megawidget designers to destroy a
# megawidget component.
self.__componentInfo[name][0].destroy()
del self.__componentInfo[name]
def destroy(self):
# Clean out any hooks
self.ignoreAll()
del self._optionInfo
del self.__componentInfo
del self.postInitialiseFuncList
def bind(self, event, command, extraArgs = []):
"""
Bind the command (which should expect one arg) to the specified
event (such as ENTER, EXIT, B1PRESS, B1CLICK, etc.)
See DirectGuiGlobals for possible events
"""
# Need to tack on gui item specific id
gEvent = event + self.guiId
if get_config_showbase().GetBool('debug-directgui-msgs', False):
from direct.showbase.PythonUtil import StackTrace
print(gEvent)
print(StackTrace())
self.accept(gEvent, command, extraArgs = extraArgs)
def unbind(self, event):
"""
Unbind the specified event
"""
# Need to tack on gui item specific id
gEvent = event + self.guiId
self.ignore(gEvent)
def toggleGuiGridSnap():
DirectGuiWidget.snapToGrid = 1 - DirectGuiWidget.snapToGrid
def setGuiGridSpacing(spacing):
DirectGuiWidget.gridSpacing = spacing
class DirectGuiWidget(DirectGuiBase, NodePath):
# Toggle if you wish widget's to snap to grid when draggin
snapToGrid = 0
gridSpacing = 0.05
# Determine the default initial state for inactive (or
# unclickable) components. If we are in edit mode, these are
# actually clickable by default.
guiEdit = get_config_showbase().GetBool('direct-gui-edit', 0)
if guiEdit:
inactiveInitState = DGG.NORMAL
else:
inactiveInitState = DGG.DISABLED
guiDict = {}
def __init__(self, parent = None, **kw):
# Direct gui widgets are node paths
# Direct gui widgets have:
# - stateNodePaths (to hold visible representation of widget)
# State node paths can have:
# - a frame of type (None, FLAT, RAISED, GROOVE, RIDGE)
# - arbitrary geometry for each state
# They inherit from DirectGuiWidget
# - Can create components (with aliases and groups)
# - Can bind to mouse events
# They inherit from NodePath
# - Can position/scale them
optiondefs = (
# Widget's constructor
('pgFunc', PGItem, None),
('numStates', 1, None),
('invertedFrames', (), None),
('sortOrder', 0, None),
# Widget's initial state
('state', DGG.NORMAL, self.setState),
# Widget's frame characteristics
('relief', DGG.FLAT, self.setRelief),
('borderWidth', (.1, .1), self.setBorderWidth),
('borderUvWidth', (.1, .1), self.setBorderUvWidth),
('frameSize', None, self.setFrameSize),
('frameColor', (.8, .8, .8, 1), self.setFrameColor),
('frameTexture', None, self.setFrameTexture),
('frameVisibleScale', (1, 1), self.setFrameVisibleScale),
('pad', (0, 0), self.resetFrameSize),
# Override button id (beware! your name may not be unique!)
('guiId', None, DGG.INITOPT),
# Initial pos/scale of the widget
('pos', None, DGG.INITOPT),
('hpr', None, DGG.INITOPT),
('scale', None, DGG.INITOPT),
('color', None, DGG.INITOPT),
# Do events pass through this widget?
('suppressMouse', 1, DGG.INITOPT),
('suppressKeys', 0, DGG.INITOPT),
('enableEdit', 1, DGG.INITOPT),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize the base classes (after defining the options).
DirectGuiBase.__init__(self)
NodePath.__init__(self)
# Create a button
self.guiItem = self['pgFunc']('')
# Override automatically generated guiId
if self['guiId']:
self.guiItem.setId(self['guiId'])
self.guiId = self.guiItem.getId()
if __dev__:
guiObjectCollector.addLevel(1)
guiObjectCollector.flushLevel()
# track gui items by guiId for tracking down leaks
if hasattr(base, 'guiItems'):
if self.guiId in base.guiItems:
base.notify.warning('duplicate guiId: %s (%s stomping %s)' %
(self.guiId, self,
base.guiItems[self.guiId]))
base.guiItems[self.guiId] = self
if hasattr(base, 'printGuiCreates'):
printStack()
# Attach button to parent and make that self
if (parent == None):
parent = aspect2d
self.assign(parent.attachNewNode(self.guiItem, self['sortOrder']))
# Update pose to initial values
if self['pos']:
self.setPos(self['pos'])
if self['hpr']:
self.setHpr(self['hpr'])
if self['scale']:
self.setScale(self['scale'])
if self['color']:
self.setColor(self['color'])
# Initialize names
# Putting the class name in helps with debugging.
self.setName("%s-%s" % (self.__class__.__name__, self.guiId))
# Create
self.stateNodePath = []
for i in range(self['numStates']):
self.stateNodePath.append(NodePath(self.guiItem.getStateDef(i)))
# Initialize frame style
self.frameStyle = []
for i in range(self['numStates']):
self.frameStyle.append(PGFrameStyle())
# For holding bounds info
self.ll = Point3(0)
self.ur = Point3(0)
# Is drag and drop enabled?
if self['enableEdit'] and self.guiEdit:
self.enableEdit()
# Set up event handling
suppressFlags = 0
if self['suppressMouse']:
suppressFlags |= MouseWatcherRegion.SFMouseButton
suppressFlags |= MouseWatcherRegion.SFMousePosition
if self['suppressKeys']:
suppressFlags |= MouseWatcherRegion.SFOtherButton
self.guiItem.setSuppressFlags(suppressFlags)
# Bind destroy hook
self.guiDict[self.guiId] = self
# self.bind(DGG.DESTROY, self.destroy)
# Update frame when everything has been initialized
self.postInitialiseFuncList.append(self.frameInitialiseFunc)
# Call option initialization functions
self.initialiseoptions(DirectGuiWidget)
def frameInitialiseFunc(self):
# Now allow changes to take effect
self.updateFrameStyle()
if not self['frameSize']:
self.resetFrameSize()
def enableEdit(self):
self.bind(DGG.B2PRESS, self.editStart)
self.bind(DGG.B2RELEASE, self.editStop)
self.bind(DGG.PRINT, self.printConfig)
# Can we move this to showbase
# Certainly we don't need to do this for every button!
#mb = base.mouseWatcherNode.getModifierButtons()
#mb.addButton(KeyboardButton.control())
#base.mouseWatcherNode.setModifierButtons(mb)
def disableEdit(self):
self.unbind(DGG.B2PRESS)
self.unbind(DGG.B2RELEASE)
self.unbind(DGG.PRINT)
#mb = base.mouseWatcherNode.getModifierButtons()
#mb.removeButton(KeyboardButton.control())
#base.mouseWatcherNode.setModifierButtons(mb)
def editStart(self, event):
taskMgr.remove('guiEditTask')
vWidget2render2d = self.getPos(render2d)
vMouse2render2d = Point3(event.getMouse()[0], 0, event.getMouse()[1])
editVec = Vec3(vWidget2render2d - vMouse2render2d)
if base.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.control()):
t = taskMgr.add(self.guiScaleTask, 'guiEditTask')
t.refPos = vWidget2render2d
t.editVecLen = editVec.length()
t.initScale = self.getScale()
else:
t = taskMgr.add(self.guiDragTask, 'guiEditTask')
t.editVec = editVec
def guiScaleTask(self, state):
mwn = base.mouseWatcherNode
if mwn.hasMouse():
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
newEditVecLen = Vec3(state.refPos - vMouse2render2d).length()
self.setScale(state.initScale * (newEditVecLen/state.editVecLen))
return Task.cont
def guiDragTask(self, state):
mwn = base.mouseWatcherNode
if mwn.hasMouse():
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
newPos = vMouse2render2d + state.editVec
self.setPos(render2d, newPos)
if DirectGuiWidget.snapToGrid:
newPos = self.getPos()
newPos.set(
ROUND_TO(newPos[0], DirectGuiWidget.gridSpacing),
ROUND_TO(newPos[1], DirectGuiWidget.gridSpacing),
ROUND_TO(newPos[2], DirectGuiWidget.gridSpacing))
self.setPos(newPos)
return Task.cont
def editStop(self, event):
taskMgr.remove('guiEditTask')
def setState(self):
if type(self['state']) == type(0):
self.guiItem.setActive(self['state'])
elif (self['state'] == DGG.NORMAL) or (self['state'] == 'normal'):
self.guiItem.setActive(1)
else:
self.guiItem.setActive(0)
def resetFrameSize(self):
if not self.fInit:
self.setFrameSize(fClearFrame = 1)
def setFrameSize(self, fClearFrame = 0):
# Use ready state to determine frame Type
frameType = self.getFrameType()
if self['frameSize']:
# Use user specified bounds
self.bounds = self['frameSize']
#print "%s bounds = %s" % (self.getName(), self.bounds)
bw = (0, 0)
else:
if fClearFrame and (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(PGFrameStyle.TNone)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# To force an update of the button
self.guiItem.getStateDef(0)
# Clear out frame before computing bounds
self.getBounds()
# Restore frame style if necessary
if (frameType != PGFrameStyle.TNone):
self.frameStyle[0].setType(frameType)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
if ((frameType != PGFrameStyle.TNone) and
(frameType != PGFrameStyle.TFlat)):
bw = self['borderWidth']
else:
bw = (0, 0)
# Set frame to new dimensions
self.guiItem.setFrame(
self.bounds[0] - bw[0],
self.bounds[1] + bw[0],
self.bounds[2] - bw[1],
self.bounds[3] + bw[1])
def getBounds(self, state = 0):
self.stateNodePath[state].calcTightBounds(self.ll, self.ur)
# Scale bounds to give a pad around graphics
vec_right = Vec3.right()
vec_up = Vec3.up()
left = (vec_right[0] * self.ll[0]
+ vec_right[1] * self.ll[1]
+ vec_right[2] * self.ll[2])
right = (vec_right[0] * self.ur[0]
+ vec_right[1] * self.ur[1]
+ vec_right[2] * self.ur[2])
bottom = (vec_up[0] * self.ll[0]
+ vec_up[1] * self.ll[1]
+ vec_up[2] * self.ll[2])
top = (vec_up[0] * self.ur[0]
+ vec_up[1] * self.ur[1]
+ vec_up[2] * self.ur[2])
self.ll = Point3(left, 0.0, bottom)
self.ur = Point3(right, 0.0, top)
self.bounds = [self.ll[0] - self['pad'][0],
self.ur[0] + self['pad'][0],
self.ll[2] - self['pad'][1],
self.ur[2] + self['pad'][1]]
return self.bounds
def getWidth(self):
return self.bounds[1] - self.bounds[0]
def getHeight(self):
return self.bounds[3] - self.bounds[2]
def getCenter(self):
x = self.bounds[0] + (self.bounds[1] - self.bounds[0])/2.0
y = self.bounds[2] + (self.bounds[3] - self.bounds[2])/2.0
return (x, y)
def getFrameType(self, state = 0):
return self.frameStyle[state].getType()
def updateFrameStyle(self):
if not self.fInit:
for i in range(self['numStates']):
self.guiItem.setFrameStyle(i, self.frameStyle[i])
def setRelief(self, fSetStyle = 1):
relief = self['relief']
# Convert None, and string arguments
if relief == None:
relief = PGFrameStyle.TNone
elif isinstance(relief, stringType):
# Convert string to frame style int
relief = DGG.FrameStyleDict[relief]
# Set style
if relief == DGG.RAISED:
for i in range(self['numStates']):
if i in self['invertedFrames']:
self.frameStyle[1].setType(DGG.SUNKEN)
else:
self.frameStyle[i].setType(DGG.RAISED)
elif relief == DGG.SUNKEN:
for i in range(self['numStates']):
if i in self['invertedFrames']:
self.frameStyle[1].setType(DGG.RAISED)
else:
self.frameStyle[i].setType(DGG.SUNKEN)
else:
for i in range(self['numStates']):
self.frameStyle[i].setType(relief)
# Apply styles
self.updateFrameStyle()
def setFrameColor(self):
# this might be a single color or a list of colors
colors = self['frameColor']
if type(colors[0]) == int or \
type(colors[0]) == float:
colors = (colors,)
for i in range(self['numStates']):
if i >= len(colors):
color = colors[-1]
else:
color = colors[i]
self.frameStyle[i].setColor(color[0], color[1], color[2], color[3])
self.updateFrameStyle()
def setFrameTexture(self):
# this might be a single texture or a list of textures
textures = self['frameTexture']
if textures == None or \
isinstance(textures, Texture) or \
isinstance(textures, stringType):
textures = (textures,) * self['numStates']
for i in range(self['numStates']):
if i >= len(textures):
texture = textures[-1]
else:
texture = textures[i]
if isinstance(texture, stringType):
texture = loader.loadTexture(texture)
if texture:
self.frameStyle[i].setTexture(texture)
else:
self.frameStyle[i].clearTexture()
self.updateFrameStyle()
def setFrameVisibleScale(self):
scale = self['frameVisibleScale']
for i in range(self['numStates']):
self.frameStyle[i].setVisibleScale(scale[0], scale[1])
self.updateFrameStyle()
def setBorderWidth(self):
width = self['borderWidth']
for i in range(self['numStates']):
self.frameStyle[i].setWidth(width[0], width[1])
self.updateFrameStyle()
def setBorderUvWidth(self):
uvWidth = self['borderUvWidth']
for i in range(self['numStates']):
self.frameStyle[i].setUvWidth(uvWidth[0], uvWidth[1])
self.updateFrameStyle()
def destroy(self):
if hasattr(self, "frameStyle"):
if __dev__:
guiObjectCollector.subLevel(1)
guiObjectCollector.flushLevel()
if hasattr(base, 'guiItems'):
if self.guiId in base.guiItems:
del base.guiItems[self.guiId]
else:
base.notify.warning(
'DirectGuiWidget.destroy(): '
'gui item %s not in base.guiItems' %
self.guiId)
# Destroy children
for child in self.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
# RAU since we added the class to the name, try
# it with the original name
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
# messenger.send(DESTROY + child.getName())
del self.guiDict[self.guiId]
del self.frameStyle
# Get rid of node path
self.removeNode()
for nodePath in self.stateNodePath:
nodePath.removeNode()
del self.stateNodePath
del self.guiItem
# Call superclass destruction method (clears out hooks)
DirectGuiBase.destroy(self)
def printConfig(self, indent = 0):
space = ' ' * indent
print('%s%s - %s' % (space, self.guiId, self.__class__.__name__))
print('%sPos: %s' % (space, tuple(self.getPos())))
print('%sScale: %s' % (space, tuple(self.getScale())))
# Print out children info
for child in self.getChildren():
messenger.send(DGG.PRINT + child.getName(), [indent + 2])
def copyOptions(self, other):
"""
Copy other's options into our self so we look and feel like other
"""
for key, value in other._optionInfo.items():
self[key] = value[1]
def taskName(self, idString):
return (idString + "-" + str(self.guiId))
def uniqueName(self, idString):
return (idString + "-" + str(self.guiId))
def setProp(self, propString, value):
"""
Allows you to set a property like frame['text'] = 'Joe' in
a function instead of an assignment.
This is useful for setting properties inside function intervals
where must input a function and extraArgs, not an assignment.
"""
self[propString] = value
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test apps for running tests using xcodebuild."""
import os
import plistlib
import struct
import subprocess
import time
import shard_util
import test_runner
import test_runner_errors
import xcode_util
OUTPUT_DISABLED_TESTS_TEST_ARG = '--write-compiled-tests-json-to-writable-path'
def get_gtest_filter(tests, invert=False):
"""Returns the GTest filter to filter the given test cases.
Args:
tests: List of test cases to filter.
invert: Whether to invert the filter or not. Inverted, the filter will match
everything except the given test cases.
Returns:
A string which can be supplied to --gtest_filter.
"""
# A colon-separated list of tests cases.
# e.g. a:b:c matches a, b, c.
# e.g. -a:b:c matches everything except a, b, c.
test_filter = ':'.join(test for test in tests)
if invert:
return '-%s' % test_filter
return test_filter
def get_bundle_id(app_path):
"""Get bundle identifier for app.
Args:
app_path: (str) A path to app.
"""
return subprocess.check_output([
'/usr/libexec/PlistBuddy',
'-c',
'Print:CFBundleIdentifier',
os.path.join(app_path, 'Info.plist'),
]).rstrip().decode("utf-8")
def is_running_rosetta():
"""Returns whether Python is being translated by Rosetta.
Returns:
True if the Python interpreter is being run as an x86_64 binary on an arm64
macOS machine. False if it is running as an arm64 binary, or if it is
running on an Intel machine.
"""
translated = subprocess.check_output(
['sysctl', '-i', '-b', 'sysctl.proc_translated'])
# "sysctl -b" is expected to return a 4-byte integer response. 1 means the
# current process is running under Rosetta, 0 means it is not. On x86_64
# machines, this variable does not exist at all, so "-i" is used to return a
# 0-byte response instead of throwing an error.
if len(translated) != 4:
return False
return struct.unpack('i', translated)[0] > 0
class GTestsApp(object):
"""Gtests app to run.
Stores data about egtests:
test_app: full path to an app.
"""
def __init__(self, test_app, **kwargs):
"""Initialize Egtests.
Args:
test_app: (str) full path to egtests app.
(Following are potential args in **kwargs)
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching.
env_vars: List of environment variables to pass to the test itself.
release: (bool) Whether the app is release build.
repeat_count: (int) Number of times to run each test case.
inserted_libs: List of libraries to insert when running the test.
Raises:
AppNotFoundError: If the given app does not exist
"""
if not os.path.exists(test_app):
raise test_runner.AppNotFoundError(test_app)
self.test_app_path = test_app
self.project_path = os.path.dirname(self.test_app_path)
self.test_args = kwargs.get('test_args') or []
self.env_vars = {}
for env_var in kwargs.get('env_vars') or []:
env_var = env_var.split('=', 1)
self.env_vars[env_var[0]] = None if len(env_var) == 1 else env_var[1]
# Keep the initial included tests since creating target. Do not modify.
self.initial_included_tests = kwargs.get('included_tests') or []
# This may be modified between test launches.
self.included_tests = kwargs.get('included_tests') or []
# This may be modified between test launches.
self.excluded_tests = kwargs.get('excluded_tests') or []
self.disabled_tests = []
self.module_name = os.path.splitext(os.path.basename(test_app))[0]
self.release = kwargs.get('release')
self.repeat_count = kwargs.get('repeat_count') or 1
self.host_app_path = kwargs.get('host_app_path')
self.inserted_libs = kwargs.get('inserted_libs') or []
def fill_xctest_run(self, out_dir):
"""Fills xctestrun file by egtests.
Args:
out_dir: (str) A path where xctestrun will store.
Returns:
A path to xctestrun file.
"""
folder = os.path.abspath(os.path.join(out_dir, os.pardir))
if not os.path.exists(folder):
os.makedirs(folder)
xctestrun = os.path.join(folder, 'run_%d.xctestrun' % int(time.time()))
if not os.path.exists(xctestrun):
with open(xctestrun, 'w'):
pass
# Creates a dict with data about egtests to run - fill all required fields:
# egtests_module, egtest_app_path, egtests_xctest_path and
# filtered tests if filter is specified.
# Write data in temp xctest run file.
plistlib.writePlist(self.fill_xctestrun_node(), xctestrun)
return xctestrun
def fill_xctestrun_node(self):
"""Fills only required nodes for egtests in xctestrun file.
Returns:
A node with filled required fields about egtests.
"""
module = self.module_name + '_module'
# If --run-with-custom-webkit is passed as a test arg, set up
# DYLD_FRAMEWORK_PATH and DYLD_LIBRARY_PATH to load the custom webkit
# modules.
dyld_path = self.project_path
if '--run-with-custom-webkit' in self.test_args:
if self.host_app_path:
webkit_path = os.path.join(self.host_app_path, 'WebKitFrameworks')
else:
webkit_path = os.path.join(self.test_app_path, 'WebKitFrameworks')
dyld_path = dyld_path + ':' + webkit_path
module_data = {
'TestBundlePath': self.test_app_path,
'TestHostPath': self.test_app_path,
'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),
'TestingEnvironmentVariables': {
'DYLD_LIBRARY_PATH':
'%s:__PLATFORMS__/iPhoneSimulator.platform/Developer/Library' %
dyld_path,
'DYLD_FRAMEWORK_PATH':
'%s:__PLATFORMS__/iPhoneSimulator.platform/'
'Developer/Library/Frameworks' % dyld_path,
}
}
if self.inserted_libs:
module_data['TestingEnvironmentVariables'][
'DYLD_INSERT_LIBRARIES'] = ':'.join(self.inserted_libs)
xctestrun_data = {module: module_data}
gtest_filter = []
if self.included_tests:
gtest_filter = get_gtest_filter(self.included_tests, invert=False)
elif self.excluded_tests:
gtest_filter = get_gtest_filter(self.excluded_tests, invert=True)
if gtest_filter:
# Removed previous gtest-filter if exists.
self.test_args = [el for el in self.test_args
if not el.startswith('--gtest_filter=')]
self.test_args.append('--gtest_filter=%s' % gtest_filter)
if self.repeat_count > 1:
self.test_args.append('--gtest_repeat=%s' % self.repeat_count)
if self.env_vars:
xctestrun_data[module].update({'EnvironmentVariables': self.env_vars})
if self.test_args:
xctestrun_data[module].update({'CommandLineArguments': self.test_args})
if self.excluded_tests:
xctestrun_data[module].update({
'SkipTestIdentifiers': self.excluded_tests
})
if self.included_tests:
xctestrun_data[module].update({
'OnlyTestIdentifiers': self.included_tests
})
return xctestrun_data
def command(self, out_dir, destination, shards):
"""Returns the command that launches tests using xcodebuild.
Format of command:
xcodebuild test-without-building -xctestrun file.xctestrun \
-parallel-testing-enabled YES -parallel-testing-worker-count %d% \
[-destination "destination"] -resultBundlePath %output_path%
Args:
out_dir: (str) An output directory.
destination: (str) A destination of running simulator.
shards: (int) A number of shards.
Returns:
A list of strings forming the command to launch the test.
"""
cmd = []
if is_running_rosetta():
cmd.extend(['arch', '-arch', 'arm64'])
cmd.extend([
'xcodebuild', 'test-without-building', '-xctestrun',
self.fill_xctest_run(out_dir), '-destination', destination,
'-resultBundlePath', out_dir
])
if shards > 1:
cmd.extend([
'-parallel-testing-enabled', 'YES', '-parallel-testing-worker-count',
str(shards)
])
return cmd
def get_all_tests(self):
"""Gets all tests to run in this object."""
# Method names that starts with test* and also are in *TestCase classes
# but they are not test-methods.
# TODO(crbug.com/982435): Rename not test methods with test-suffix.
non_test_prefixes = [
'ChromeTestCase/testServer', 'FindInPageTestCase/testURL',
'setUpForTestCase'
]
# TODO(crbug.com/1123681): Move all_tests to class var. Set all_tests,
# disabled_tests values in initialization to avoid multiple calls to otool.
all_tests = []
# Only store the tests when there is the test arg.
store_disabled_tests = OUTPUT_DISABLED_TESTS_TEST_ARG in self.test_args
self.disabled_tests = []
for test_class, test_method in shard_util.fetch_test_names(
self.test_app_path,
self.host_app_path,
self.release,
enabled_tests_only=False):
test_name = '%s/%s' % (test_class, test_method)
if any(test_name.startswith(prefix) for prefix in non_test_prefixes):
continue
# |self.initial_included_tests| contains the tests to execute, which
# may be a subset of all tests b/c of the iOS test sharding logic in
# run.py. Filter by |self.initial_included_tests| if specified.
# |self.initial_included_tests| might store test class or full name.
included = self.initial_included_tests
if not included or test_name in included or test_class in included:
if test_method.startswith('test'):
all_tests.append(test_name)
elif store_disabled_tests:
self.disabled_tests.append(test_name)
return all_tests
class EgtestsApp(GTestsApp):
"""Egtests to run.
Stores data about egtests:
egtests_app: full path to egtests app.
project_path: root project folder.
module_name: egtests module name.
included_tests: List of tests to run.
excluded_tests: List of tests not to run.
"""
def __init__(self, egtests_app, **kwargs):
"""Initialize Egtests.
Args:
egtests_app: (str) full path to egtests app.
(Following are potential args in **kwargs)
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching.
env_vars: List of environment variables to pass to the test itself.
host_app_path: (str) full path to host app.
inserted_libs: List of libraries to insert when running the test.
repeat_count: (int) Number of times to run each test case.
Raises:
AppNotFoundError: If the given app does not exist
"""
inserted_libs = list(kwargs.get('inserted_libs') or [])
inserted_libs.append('__PLATFORMS__/iPhoneSimulator.platform/Developer/'
'usr/lib/libXCTestBundleInject.dylib')
kwargs['inserted_libs'] = inserted_libs
super(EgtestsApp, self).__init__(egtests_app, **kwargs)
def _xctest_path(self):
"""Gets xctest-file from egtests/PlugIns folder.
Returns:
A path for xctest in the format of /PlugIns/file.xctest
Raises:
PlugInsNotFoundError: If no PlugIns folder found in egtests.app.
XCTestPlugInNotFoundError: If no xctest-file found in PlugIns.
"""
plugins_dir = os.path.join(self.test_app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise test_runner.PlugInsNotFoundError(plugins_dir)
plugin_xctest = None
if os.path.exists(plugins_dir):
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
plugin_xctest = os.path.join(plugins_dir, plugin)
if not plugin_xctest:
raise test_runner.XCTestPlugInNotFoundError(plugin_xctest)
return plugin_xctest.replace(self.test_app_path, '')
def command(self, out_dir, destination, shards):
"""Returns the command that launches tests for EG Tests.
See details in parent class method docstring. This method appends the
command line switch if test repeat is required.
"""
cmd = super(EgtestsApp, self).command(out_dir, destination, shards)
if self.repeat_count > 1:
if xcode_util.using_xcode_13_or_higher():
cmd += ['-test-iterations', str(self.repeat_count)]
else:
raise test_runner_errors.XcodeUnsupportedFeatureError(
'Test repeat is only supported in Xcode 13 or higher!')
return cmd
def fill_xctestrun_node(self):
"""Fills only required nodes for egtests in xctestrun file.
Returns:
A node with filled required fields about egtests.
"""
xctestrun_data = super(EgtestsApp, self).fill_xctestrun_node()
module_data = xctestrun_data[self.module_name + '_module']
module_data['TestBundlePath'] = '__TESTHOST__%s' % self._xctest_path()
module_data['TestingEnvironmentVariables'][
'XCInjectBundleInto'] = '__TESTHOST__/%s' % self.module_name
if self.host_app_path:
# Module data specific to EG2 tests
module_data['IsUITestBundle'] = True
module_data['IsXCTRunnerHostedTestBundle'] = True
module_data['UITargetAppPath'] = '%s' % self.host_app_path
# Special handling for Xcode10.2
dependent_products = [
module_data['UITargetAppPath'],
module_data['TestBundlePath'],
module_data['TestHostPath']
]
module_data['DependentProductPaths'] = dependent_products
# Module data specific to EG1 tests
else:
module_data['IsAppHostedTestBundle'] = True
return xctestrun_data
class DeviceXCTestUnitTestsApp(GTestsApp):
"""XCTest hosted unit tests to run on devices.
This is for the XCTest framework hosted unit tests running on devices.
Stores data about tests:
tests_app: full path to tests app.
project_path: root project folder.
module_name: egtests module name.
included_tests: List of tests to run.
excluded_tests: List of tests not to run.
"""
def __init__(self, tests_app, **kwargs):
"""Initialize the class.
Args:
tests_app: (str) full path to tests app.
(Following are potential args in **kwargs)
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching. Test arg to run as XCTest based unit test will be appended.
env_vars: List of environment variables to pass to the test itself.
repeat_count: (int) Number of times to run each test case.
Raises:
AppNotFoundError: If the given app does not exist
"""
test_args = list(kwargs.get('test_args') or [])
test_args.append('--enable-run-ios-unittests-with-xctest')
kwargs['test_args'] = test_args
super(DeviceXCTestUnitTestsApp, self).__init__(tests_app, **kwargs)
# TODO(crbug.com/1077277): Refactor class structure and remove duplicate code.
def _xctest_path(self):
"""Gets xctest-file from egtests/PlugIns folder.
Returns:
A path for xctest in the format of /PlugIns/file.xctest
Raises:
PlugInsNotFoundError: If no PlugIns folder found in egtests.app.
XCTestPlugInNotFoundError: If no xctest-file found in PlugIns.
"""
plugins_dir = os.path.join(self.test_app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise test_runner.PlugInsNotFoundError(plugins_dir)
plugin_xctest = None
if os.path.exists(plugins_dir):
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
plugin_xctest = os.path.join(plugins_dir, plugin)
if not plugin_xctest:
raise test_runner.XCTestPlugInNotFoundError(plugin_xctest)
return plugin_xctest.replace(self.test_app_path, '')
def fill_xctestrun_node(self):
"""Fills only required nodes for XCTest hosted unit tests in xctestrun file.
Returns:
A node with filled required fields about tests.
"""
xctestrun_data = {
'TestTargetName': {
'IsAppHostedTestBundle': True,
'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),
'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),
'TestHostPath': '%s' % self.test_app_path,
'TestingEnvironmentVariables': {
'DYLD_INSERT_LIBRARIES':
'__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',
'DYLD_LIBRARY_PATH':
'__PLATFORMS__/iPhoneOS.platform/Developer/Library',
'DYLD_FRAMEWORK_PATH':
'__PLATFORMS__/iPhoneOS.platform/Developer/'
'Library/Frameworks',
'XCInjectBundleInto':
'__TESTHOST__/%s' % self.module_name
}
}
}
if self.env_vars:
self.xctestrun_data['TestTargetName'].update(
{'EnvironmentVariables': self.env_vars})
gtest_filter = []
if self.included_tests:
gtest_filter = get_gtest_filter(self.included_tests, invert=False)
elif self.excluded_tests:
gtest_filter = get_gtest_filter(self.excluded_tests, invert=True)
if gtest_filter:
# Removed previous gtest-filter if exists.
self.test_args = [
el for el in self.test_args if not el.startswith('--gtest_filter=')
]
self.test_args.append('--gtest_filter=%s' % gtest_filter)
self.test_args.append('--gmock_verbose=error')
xctestrun_data['TestTargetName'].update(
{'CommandLineArguments': self.test_args})
return xctestrun_data
class SimulatorXCTestUnitTestsApp(GTestsApp):
"""XCTest hosted unit tests to run on simulators.
This is for the XCTest framework hosted unit tests running on simulators.
Stores data about tests:
tests_app: full path to tests app.
project_path: root project folder.
module_name: egtests module name.
included_tests: List of tests to run.
excluded_tests: List of tests not to run.
"""
def __init__(self, tests_app, **kwargs):
"""Initialize the class.
Args:
tests_app: (str) full path to tests app.
(Following are potential args in **kwargs)
included_tests: (list) Specific tests to run
E.g.
[ 'TestCaseClass1/testMethod1', 'TestCaseClass2/testMethod2']
excluded_tests: (list) Specific tests not to run
E.g.
[ 'TestCaseClass1', 'TestCaseClass2/testMethod2']
test_args: List of strings to pass as arguments to the test when
launching. Test arg to run as XCTest based unit test will be appended.
env_vars: List of environment variables to pass to the test itself.
repeat_count: (int) Number of times to run each test case.
Raises:
AppNotFoundError: If the given app does not exist
"""
test_args = list(kwargs.get('test_args') or [])
test_args.append('--enable-run-ios-unittests-with-xctest')
kwargs['test_args'] = test_args
super(SimulatorXCTestUnitTestsApp, self).__init__(tests_app, **kwargs)
# TODO(crbug.com/1077277): Refactor class structure and remove duplicate code.
def _xctest_path(self):
"""Gets xctest-file from egtests/PlugIns folder.
Returns:
A path for xctest in the format of /PlugIns/file.xctest
Raises:
PlugInsNotFoundError: If no PlugIns folder found in egtests.app.
XCTestPlugInNotFoundError: If no xctest-file found in PlugIns.
"""
plugins_dir = os.path.join(self.test_app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise test_runner.PlugInsNotFoundError(plugins_dir)
plugin_xctest = None
if os.path.exists(plugins_dir):
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
plugin_xctest = os.path.join(plugins_dir, plugin)
if not plugin_xctest:
raise test_runner.XCTestPlugInNotFoundError(plugin_xctest)
return plugin_xctest.replace(self.test_app_path, '')
def fill_xctestrun_node(self):
"""Fills only required nodes for XCTest hosted unit tests in xctestrun file.
Returns:
A node with filled required fields about tests.
"""
xctestrun_data = {
'TestTargetName': {
'IsAppHostedTestBundle': True,
'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),
'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),
'TestHostPath': '%s' % self.test_app_path,
'TestingEnvironmentVariables': {
'DYLD_INSERT_LIBRARIES':
'__PLATFORMS__/iPhoneSimulator.platform/Developer/usr/lib/'
'libXCTestBundleInject.dylib',
'DYLD_LIBRARY_PATH':
'__PLATFORMS__/iPhoneSimulator.platform/Developer/Library',
'DYLD_FRAMEWORK_PATH':
'__PLATFORMS__/iPhoneSimulator.platform/Developer/'
'Library/Frameworks',
'XCInjectBundleInto':
'__TESTHOST__/%s' % self.module_name
}
}
}
if self.env_vars:
self.xctestrun_data['TestTargetName'].update(
{'EnvironmentVariables': self.env_vars})
gtest_filter = []
if self.included_tests:
gtest_filter = get_gtest_filter(self.included_tests, invert=False)
elif self.excluded_tests:
gtest_filter = get_gtest_filter(self.excluded_tests, invert=True)
if gtest_filter:
# Removed previous gtest-filter if exists.
self.test_args = [
el for el in self.test_args if not el.startswith('--gtest_filter=')
]
self.test_args.append('--gtest_filter=%s' % gtest_filter)
self.test_args.append('--gmock_verbose=error')
xctestrun_data['TestTargetName'].update(
{'CommandLineArguments': self.test_args})
return xctestrun_data
|
|
import functools
import hashlib
import json
import random
import uuid
from operator import attrgetter
from django import http
from django.conf import settings
from django.db.models import Q
from django.shortcuts import (get_list_or_404, get_object_or_404, redirect,
render)
from django.utils.translation import trans_real as translation
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
import caching.base as caching
import jinja2
import commonware.log
import session_csrf
from tower import ugettext as _, ugettext_lazy as _lazy
import waffle
from mobility.decorators import mobilized, mobile_template
import amo
from amo import messages
from amo.decorators import post_required
from amo.forms import AbuseForm
from amo.utils import randslice, sorted_groupby
from amo.models import manual_order
from amo import urlresolvers
from amo.urlresolvers import reverse
from abuse.models import send_abuse_report
from bandwagon.models import Collection, CollectionFeature, CollectionPromo
import paypal
from reviews.forms import ReviewForm
from reviews.models import Review, GroupedRating
from session_csrf import anonymous_csrf_exempt
from sharing.views import share as share_redirect
from stats.models import Contribution
from translations.query import order_by_translation
from versions.models import Version
from .forms import ContributionForm
from .models import Addon, Persona, FrozenAddon
from .decorators import addon_view_factory
log = commonware.log.getLogger('z.addons')
paypal_log = commonware.log.getLogger('z.paypal')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
def author_addon_clicked(f):
"""Decorator redirecting clicks on "Other add-ons by author"."""
@functools.wraps(f)
def decorated(request, *args, **kwargs):
redirect_id = request.GET.get('addons-author-addons-select', None)
if not redirect_id:
return f(request, *args, **kwargs)
try:
target_id = int(redirect_id)
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[target_id]))
except ValueError:
return http.HttpResponseBadRequest('Invalid add-on ID.')
return decorated
@addon_valid_disabled_pending_view
def addon_detail(request, addon):
"""Add-ons details page dispatcher."""
if addon.is_deleted or (addon.is_pending() and not addon.is_persona()):
# Allow pending themes to be listed.
raise http.Http404
if addon.is_disabled:
return render(request, 'addons/impala/disabled.html',
{'addon': addon}, status=404)
# addon needs to have a version and be valid for this app.
if addon.type in request.APP.types:
if addon.type == amo.ADDON_PERSONA:
return persona_detail(request, addon)
else:
if not addon.current_version:
raise http.Http404
return extension_detail(request, addon)
else:
# Redirect to an app that supports this type.
try:
new_app = [a for a in amo.APP_USAGE if addon.type
in a.types][0]
except IndexError:
raise http.Http404
else:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = new_app.short
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[addon.slug]))
@vary_on_headers('X-Requested-With')
def extension_detail(request, addon):
"""Extensions details page."""
# If current version is incompatible with this app, redirect.
comp_apps = addon.compatible_apps
if comp_apps and request.APP not in comp_apps:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = comp_apps.keys()[0].short
return redirect('addons.detail', addon.slug, permanent=True)
# Addon recommendations.
recommended = Addon.objects.listed(request.APP).filter(
recommended_for__addon=addon)[:6]
# Popular collections this addon is part of.
collections = Collection.objects.listed().filter(
addons=addon, application__id=request.APP.id)
ctx = {
'addon': addon,
'src': request.GET.get('src', 'dp-btn-primary'),
'version_src': request.GET.get('src', 'dp-btn-version'),
'tags': addon.tags.not_blacklisted(),
'grouped_ratings': GroupedRating.get(addon.id),
'recommendations': recommended,
'review_form': ReviewForm(),
'reviews': Review.objects.valid().filter(addon=addon, is_latest=True),
'get_replies': Review.get_replies,
'collections': collections.order_by('-subscribers')[:3],
'abuse_form': AbuseForm(request=request),
}
# details.html just returns the top half of the page for speed. The bottom
# does a lot more queries we don't want on the initial page load.
if request.is_ajax():
# Other add-ons/apps from the same author(s).
ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]
return render(request, 'addons/impala/details-more.html', ctx)
else:
return render(request, 'addons/impala/details.html', ctx)
@mobilized(extension_detail)
def extension_detail(request, addon):
return render(request, 'addons/mobile/details.html', {'addon': addon})
def _category_personas(qs, limit):
f = lambda: randslice(qs, limit=limit)
key = 'cat-personas:' + qs.query_key()
return caching.cached(f, key)
@mobile_template('addons/{mobile/}persona_detail.html')
def persona_detail(request, addon, template=None):
"""Details page for Personas."""
if not (addon.is_public() or addon.is_pending()):
raise http.Http404
persona = addon.persona
# This persona's categories.
categories = addon.categories.all()
category_personas = None
if categories.exists():
qs = Addon.objects.public().filter(categories=categories[0])
category_personas = _category_personas(qs, limit=6)
data = {
'addon': addon,
'persona': persona,
'categories': categories,
'author_personas': persona.authors_other_addons()[:3],
'category_personas': category_personas,
}
try:
author = addon.authors.all()[0]
except IndexError:
author = None
else:
author = author.get_url_path(src='addon-detail')
data['author_gallery'] = author
if not request.MOBILE:
# tags
dev_tags, user_tags = addon.tags_partitioned_by_developer
data.update({
'dev_tags': dev_tags,
'user_tags': user_tags,
'review_form': ReviewForm(),
'reviews': Review.objects.valid().filter(addon=addon,
is_latest=True),
'get_replies': Review.get_replies,
'search_cat': 'themes',
'abuse_form': AbuseForm(request=request),
})
return render(request, template, data)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
filter = self._filter(field) & self.base_queryset
order = getattr(self, 'order_%s' % field, None)
if order:
return order(filter)
return filter
def _filter(self, field):
return getattr(self, 'filter_%s' % field)()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.model.objects, ids, 'addons.id')
def filter_free(self):
if self.model == Addon:
return self.model.objects.top_free(self.request.APP, listed=False)
else:
return self.model.objects.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.model.objects.top_paid(self.request.APP, listed=False)
else:
return self.model.objects.top_paid(listed=False)
def filter_popular(self):
return (self.model.objects.order_by('-weekly_downloads')
.with_index(addons='downloads_type_idx'))
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return (self.model.objects.order_by('-average_daily_users')
.with_index(addons='adus_type_idx'))
def filter_created(self):
return (self.model.objects.order_by('-created')
.with_index(addons='created_type_idx'))
def filter_updated(self):
return (self.model.objects.order_by('-last_updated')
.with_index(addons='last_updated_type_idx'))
def filter_rating(self):
return (self.model.objects.order_by('-bayesian_rating')
.with_index(addons='rating_type_idx'))
def filter_hotness(self):
return self.model.objects.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.model.objects.all(), 'name')
class ESBaseFilter(BaseFilter):
"""BaseFilter that uses elasticsearch."""
def __init__(self, request, base, key, default):
super(ESBaseFilter, self).__init__(request, base, key, default)
def filter(self, field):
sorts = {'name': 'name_sort',
'created': '-created',
'updated': '-last_updated',
'popular': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating'}
return self.base_queryset.order_by(sorts[field])
class HomepageFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('popular', _lazy(u'Popular')),
('new', _lazy(u'Recently Added')),
('updated', _lazy(u'Recently Updated')))
filter_new = BaseFilter.filter_created
def home(request):
# Add-ons.
base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION)
# This is lame for performance. Kill it with ES.
frozen = list(FrozenAddon.objects.values_list('addon', flat=True))
# Collections.
collections = Collection.objects.filter(listed=True,
application=request.APP.id,
type=amo.COLLECTION_FEATURED)
featured = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_EXTENSION)[:18]
popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10]
hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18]
personas = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_PERSONA)[:18]
return render(request, 'addons/home.html',
{'popular': popular, 'featured': featured,
'hotness': hotness, 'personas': personas,
'src': 'homepage', 'collections': collections})
@mobilized(home)
def home(request):
# Shuffle the list and get 3 items.
rand = lambda xs: random.shuffle(xs) or xs[:3]
# Get some featured add-ons with randomness.
featured = Addon.featured_random(request.APP, request.LANG)[:3]
# Get 10 popular add-ons, then pick 3 at random.
qs = list(Addon.objects.listed(request.APP)
.filter(type=amo.ADDON_EXTENSION)
.order_by('-average_daily_users')
.values_list('id', flat=True)[:10])
popular = rand(qs)
# Do one query and split up the add-ons.
addons = (Addon.objects.filter(id__in=featured + popular)
.filter(type=amo.ADDON_EXTENSION))
featured = [a for a in addons if a.id in featured]
popular = sorted([a for a in addons if a.id in popular],
key=attrgetter('average_daily_users'), reverse=True)
return render(request, 'addons/mobile/home.html',
{'featured': featured, 'popular': popular})
def homepage_promos(request):
from discovery.views import promos
version, platform = request.GET.get('version'), request.GET.get('platform')
if not (platform or version):
raise http.Http404
return promos(request, 'home', version, platform)
class CollectionPromoBox(object):
def __init__(self, request):
self.request = request
def features(self):
return CollectionFeature.objects.all()
def collections(self):
features = self.features()
lang = translation.to_language(translation.get_language())
locale = Q(locale='') | Q(locale=lang)
promos = (CollectionPromo.objects.filter(locale)
.filter(collection_feature__in=features)
.transform(CollectionPromo.transformer))
groups = sorted_groupby(promos, 'collection_feature_id')
# We key by feature_id and locale, so we can favor locale specific
# promos.
promo_dict = {}
for feature_id, v in groups:
promo = v.next()
key = (feature_id, translation.to_language(promo.locale))
promo_dict[key] = promo
rv = {}
# If we can, we favor locale specific collections.
for feature in features:
key = (feature.id, lang)
if key not in promo_dict:
key = (feature.id, '')
if key not in promo_dict:
continue
# We only want to see public add-ons on the front page.
c = promo_dict[key].collection
c.public_addons = c.addons.all() & Addon.objects.public()
rv[feature] = c
return rv
def __nonzero__(self):
return self.request.APP == amo.FIREFOX
@addon_view
def eula(request, addon, file_id=None):
if not addon.eula:
return http.HttpResponseRedirect(addon.get_url_path())
if file_id:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
return render(request, 'addons/eula.html',
{'addon': addon, 'version': version})
@addon_view
def privacy(request, addon):
if not addon.privacy_policy:
return http.HttpResponseRedirect(addon.get_url_path())
return render(request, 'addons/privacy.html', {'addon': addon})
@addon_view
def developers(request, addon, page):
if addon.is_persona():
raise http.Http404()
if 'version' in request.GET:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=request.GET['version'])[0]
else:
version = addon.current_version
if 'src' in request.GET:
contribution_src = src = request.GET['src']
else:
page_srcs = {
'developers': ('developers', 'meet-developers'),
'installed': ('meet-the-developer-post-install', 'post-download'),
'roadblock': ('meetthedeveloper_roadblock', 'roadblock'),
}
# Download src and contribution_src are different.
src, contribution_src = page_srcs.get(page)
return render(request, 'addons/impala/developers.html',
{'addon': addon, 'page': page, 'src': src,
'contribution_src': contribution_src,
'version': version})
@addon_view
@anonymous_csrf_exempt
@post_required
def contribute(request, addon):
contrib_type = request.POST.get('type', 'suggested')
is_suggested = contrib_type == 'suggested'
source = request.POST.get('source', '')
comment = request.POST.get('comment', '')
amount = {
'suggested': addon.suggested_amount,
'onetime': request.POST.get('onetime-amount', '')
}.get(contrib_type, '')
if not amount:
amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION
# This is all going to get shoved into solitude. Temporary.
form = ContributionForm({'amount': amount})
if not form.is_valid():
return http.HttpResponse(json.dumps({'error': 'Invalid data.',
'status': '', 'url': '',
'paykey': ''}),
content_type='application/json')
contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest()
if addon.charity:
# TODO(andym): Figure out how to get this in the addon authors
# locale, rather than the contributors locale.
name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name),
addon.charity.paypal)
else:
name, paypal_id = addon.name, addon.paypal_id
# l10n: {0} is the addon name
contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name))
paykey, error, status = '', '', ''
try:
paykey, status = paypal.get_paykey(
dict(amount=amount,
email=paypal_id,
ip=request.META.get('REMOTE_ADDR'),
memo=contrib_for,
pattern='addons.paypal',
slug=addon.slug,
uuid=contribution_uuid))
except paypal.PaypalError as error:
paypal.paypal_log_cef(request, addon, contribution_uuid,
'PayKey Failure', 'PAYKEYFAIL',
'There was an error getting the paykey')
log.error('Error getting paykey, contribution for addon: %s'
% addon.pk, exc_info=True)
if paykey:
contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id,
amount=amount, source=source,
source_locale=request.LANG,
annoying=addon.annoying,
uuid=str(contribution_uuid),
is_suggested=is_suggested,
suggested_amount=addon.suggested_amount,
comment=comment, paykey=paykey)
contrib.save()
url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)
if request.GET.get('result_type') == 'json' or request.is_ajax():
# If there was an error getting the paykey, then JSON will
# not have a paykey and the JS can cope appropriately.
return http.HttpResponse(json.dumps({'url': url,
'paykey': paykey,
'error': str(error),
'status': status}),
content_type='application/json')
return http.HttpResponseRedirect(url)
@csrf_exempt
@addon_view
def paypal_result(request, addon, status):
uuid = request.GET.get('uuid')
if not uuid:
raise http.Http404()
if status == 'cancel':
log.info('User cancelled contribution: %s' % uuid)
else:
log.info('User completed contribution: %s' % uuid)
response = render(request, 'addons/paypal_result.html',
{'addon': addon, 'status': status})
response['x-frame-options'] = 'allow'
return response
@addon_view
def share(request, addon):
"""Add-on sharing"""
return share_redirect(request, addon, addon.name, addon.summary)
@addon_view
def license(request, addon, version=None):
if version is not None:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=version)[0]
else:
version = addon.current_version
if not (version and version.license):
raise http.Http404
return render(request, 'addons/impala/license.html',
dict(addon=addon, version=version))
def license_redirect(request, version):
version = get_object_or_404(Version, pk=version)
return redirect(version.license_url(), permanent=True)
@session_csrf.anonymous_csrf_exempt
@addon_view
def report_abuse(request, addon):
form = AbuseForm(request.POST or None, request=request)
if request.method == "POST" and form.is_valid():
send_abuse_report(request, addon, form.cleaned_data['text'])
messages.success(request, _('Abuse reported.'))
return http.HttpResponseRedirect(addon.get_url_path())
else:
return render(request, 'addons/report_abuse_full.html',
{'addon': addon, 'abuse_form': form})
@cache_control(max_age=60 * 60 * 24)
def persona_redirect(request, persona_id):
if persona_id == 0:
# Newer themes have persona_id == 0, doesn't mean anything.
return http.HttpResponseNotFound()
persona = get_object_or_404(Persona, persona_id=persona_id)
try:
to = reverse('addons.detail', args=[persona.addon.slug])
except Addon.DoesNotExist:
# Would otherwise throw 500. Something funky happened during GP
# migration which caused some Personas to be without Addons (problem
# with cascading deletes?). Tell GoogleBot these are dead with a 404.
return http.HttpResponseNotFound()
return http.HttpResponsePermanentRedirect(to)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the filesystem backend store"""
import __builtin__
import errno
import hashlib
import json
import mock
import os
import StringIO
import uuid
import fixtures
import six
from glance.store._drivers.filesystem import ChunkedFile
from glance.store._drivers.filesystem import Store
from glance.store import exceptions
from glance.store.location import get_location_from_uri
from glance.store.openstack.common import units
from glance.store.tests import base
KB = 1024
class TestStore(base.StoreBaseTest):
def setUp(self):
"""Establish a clean test environment."""
super(TestStore, self).setUp()
self.orig_chunksize = ChunkedFile.CHUNKSIZE
ChunkedFile.CHUNKSIZE = 10
self.store = Store(self.conf)
self.config(filesystem_store_datadir=self.test_dir,
group="glance_store")
self.store.configure()
def tearDown(self):
"""Clear the test environment."""
super(TestStore, self).tearDown()
ChunkedFile.CHUNKSIZE = self.orig_chunksize
def test_get(self):
"""Test a "normal" retrieval of an image in chunks."""
# First add an image...
image_id = str(uuid.uuid4())
file_contents = "chunk00000remainder"
image_file = StringIO.StringIO(file_contents)
location, size, checksum, _ = self.store.add(image_id,
image_file,
len(file_contents))
# Now read it back...
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = get_location_from_uri(uri)
(image_file, image_size) = self.store.get(loc)
expected_data = "chunk00000remainder"
expected_num_chunks = 2
data = ""
num_chunks = 0
for chunk in image_file:
num_chunks += 1
data += chunk
self.assertEqual(expected_data, data)
self.assertEqual(expected_num_chunks, num_chunks)
def test_get_non_existing(self):
"""
Test that trying to retrieve a file that doesn't exist
raises an error
"""
loc = get_location_from_uri("file:///%s/non-existing" % self.test_dir)
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def test_add(self):
"""Test that we can add an image via the filesystem backend"""
ChunkedFile.CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * KB # 5K
expected_file_contents = "*" * expected_file_size
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (self.test_dir,
expected_image_id)
image_file = StringIO.StringIO(expected_file_contents)
location, size, checksum, _ = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
uri = "file:///%s/%s" % (self.test_dir, expected_image_id)
loc = get_location_from_uri(uri)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = ""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_check_metadata_success(self):
expected_image_id = str(uuid.uuid4())
in_metadata = {'akey': u'some value', 'list': [u'1', u'2', u'3']}
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(in_metadata, fptr)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = StringIO.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, in_metadata)
def test_add_check_metadata_bad_data(self):
expected_image_id = str(uuid.uuid4())
in_metadata = {'akey': 10} # only unicode is allowed
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
with open(jsonfilename, 'w') as fptr:
json.dump(in_metadata, fptr)
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = StringIO.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, {})
def test_add_check_metadata_bad_nosuch_file(self):
expected_image_id = str(uuid.uuid4())
jsonfilename = os.path.join(self.test_dir,
"storage_metadata.%s" % expected_image_id)
self.config(filesystem_store_metadata_file=jsonfilename,
group="glance_store")
expected_file_size = 10
expected_file_contents = "*" * expected_file_size
image_file = StringIO.StringIO(expected_file_contents)
location, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(metadata, {})
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
ChunkedFile.CHUNKSIZE = 1024
image_id = str(uuid.uuid4())
file_size = 5 * KB # 5K
file_contents = "*" * file_size
image_file = StringIO.StringIO(file_contents)
location, size, checksum, _ = self.store.add(image_id,
image_file,
file_size)
image_file = StringIO.StringIO("nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
image_id, image_file, 0)
def _do_test_add_write_failure(self, errno, exception):
ChunkedFile.CHUNKSIZE = 1024
image_id = str(uuid.uuid4())
file_size = 5 * KB # 5K
file_contents = "*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = StringIO.StringIO(file_contents)
with mock.patch.object(__builtin__, 'open') as popen:
e = IOError()
e.errno = errno
popen.side_effect = e
self.assertRaises(exception,
self.store.add,
image_id, image_file, 0)
self.assertFalse(os.path.exists(path))
def test_add_storage_full(self):
"""
Tests that adding an image without enough space on disk
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.ENOSPC, exceptions.StorageFull)
def test_add_file_too_big(self):
"""
Tests that adding an excessively large image file
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EFBIG, exceptions.StorageFull)
def test_add_storage_write_denied(self):
"""
Tests that adding an image with insufficient filestore permissions
raises an appropriate exception
"""
self._do_test_add_write_failure(errno.EACCES,
exceptions.StorageWriteDenied)
def test_add_other_failure(self):
"""
Tests that a non-space-related IOError does not raise a
StorageFull exceptions.
"""
self._do_test_add_write_failure(errno.ENOTDIR, IOError)
def test_add_cleanup_on_read_failure(self):
"""
Tests the partial image file is cleaned up after a read
failure.
"""
ChunkedFile.CHUNKSIZE = 1024
image_id = str(uuid.uuid4())
file_size = 5 * KB # 5K
file_contents = "*" * file_size
path = os.path.join(self.test_dir, image_id)
image_file = StringIO.StringIO(file_contents)
def fake_Error(size):
raise AttributeError()
with mock.patch.object(image_file, 'read') as mock_read:
mock_read.side_effect = fake_Error
self.assertRaises(AttributeError,
self.store.add,
image_id, image_file, 0)
self.assertFalse(os.path.exists(path))
def test_delete(self):
"""
Test we can delete an existing image in the filesystem store
"""
# First add an image
image_id = str(uuid.uuid4())
file_size = 5 * KB # 5K
file_contents = "*" * file_size
image_file = StringIO.StringIO(file_contents)
location, size, checksum, _ = self.store.add(image_id,
image_file,
file_size)
# Now check that we can delete it
uri = "file:///%s/%s" % (self.test_dir, image_id)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a file that doesn't exist
raises an error
"""
loc = get_location_from_uri("file:///tmp/glance-tests/non-existing")
self.assertRaises(exceptions.NotFound,
self.store.delete,
loc)
def test_configure_add_with_multi_datadirs(self):
"""
Tests multiple filesystem specified by filesystem_store_datadirs
are parsed correctly.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.clear_override('filesystem_store_datadir',
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
expected_priority_map = {100: [store_map[0]], 200: [store_map[1]]}
expected_priority_list = [200, 100]
self.assertEqual(self.store.priority_data_map, expected_priority_map)
self.assertEqual(self.store.priority_list, expected_priority_list)
def test_configure_add_same_dir_multiple_times(self):
"""
Tests BadStoreConfiguration exception is raised if same directory
is specified multiple times in filesystem_store_datadirs.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.clear_override('filesystem_store_datadir',
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200",
store_map[0] + ":300"],
group='glance_store')
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.configure_add)
def test_add_with_multiple_dirs(self):
"""Test adding multiple filesystem directories."""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.clear_override('filesystem_store_datadir',
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
"""Test that we can add an image via the filesystem backend"""
ChunkedFile.CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = "*" * expected_file_size
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
expected_location = "file://%s/%s" % (store_map[1],
expected_image_id)
image_file = six.StringIO(expected_file_contents)
location, size, checksum, _ = self.store.add(expected_image_id,
image_file,
expected_file_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_file_size, size)
self.assertEqual(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_file, new_image_size) = self.store.get(loc)
new_image_contents = ""
new_image_file_size = 0
for chunk in new_image_file:
new_image_file_size += len(chunk)
new_image_contents += chunk
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
def test_add_with_multiple_dirs_storage_full(self):
"""
Test StorageFull exception is raised if no filesystem directory
is found that can store an image.
"""
store_map = [self.useFixture(fixtures.TempDir()).path,
self.useFixture(fixtures.TempDir()).path]
self.conf.clear_override('filesystem_store_datadir',
group='glance_store')
self.conf.set_override('filesystem_store_datadirs',
[store_map[0] + ":100",
store_map[1] + ":200"],
group='glance_store')
self.store.configure_add()
def fake_get_capacity_info(mount_point):
return 0
with mock.patch.object(self.store, '_get_capacity_info') as capacity:
capacity.return_value = 0
ChunkedFile.CHUNKSIZE = 1024
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
expected_file_contents = "*" * expected_file_size
image_file = six.StringIO(expected_file_contents)
self.assertRaises(exceptions.StorageFull, self.store.add,
expected_image_id, image_file,
expected_file_size)
|
|
#! /usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The script for family tree or general graphs experiments."""
import copy
import collections
import functools
import os
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import jacinle.random as random
import jacinle.io as io
import jactorch.nn as jacnn
from difflogic.cli import format_args
from difflogic.dataset.graph import GraphOutDegreeDataset, \
GraphConnectivityDataset, GraphAdjacentDataset, FamilyTreeDataset
from difflogic.nn.baselines import MemoryNet
from difflogic.nn.neural_logic import LogicMachine, LogicInference, LogitsInference
from difflogic.nn.neural_logic.modules._utils import meshgrid_exclude_self
from difflogic.nn.rl.reinforce import REINFORCELoss
from difflogic.thutils import binary_accuracy
from difflogic.train import TrainerBase
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger, set_output_file
from jacinle.utils.container import GView
from jacinle.utils.meter import GroupMeters
from jactorch.data.dataloader import JacDataLoader
from jactorch.optim.accum_grad import AccumGrad
from jactorch.optim.quickaccess import get_optimizer
from jactorch.train.env import TrainerEnv
from jactorch.utils.meta import as_cuda, as_numpy, as_tensor
TASKS = [
'outdegree', 'connectivity', 'adjacent', 'adjacent-mnist', 'has-father',
'has-sister', 'grandparents', 'uncle', 'maternal-great-uncle'
]
parser = JacArgumentParser()
parser.add_argument(
'--model',
default='nlm',
choices=['nlm', 'memnet'],
help='model choices, nlm: Neural Logic Machine, memnet: Memory Networks')
# NLM parameters, works when model is 'nlm'
nlm_group = parser.add_argument_group('Neural Logic Machines')
LogicMachine.make_nlm_parser(
nlm_group, {
'depth': 4,
'breadth': 3,
'exclude_self': True,
'logic_hidden_dim': []
},
prefix='nlm')
nlm_group.add_argument(
'--nlm-attributes',
type=int,
default=8,
metavar='N',
help='number of output attributes in each group of each layer of the LogicMachine'
)
# MemNN parameters, works when model is 'memnet'
memnet_group = parser.add_argument_group('Memory Networks')
MemoryNet.make_memnet_parser(memnet_group, {}, prefix='memnet')
# task related
task_group = parser.add_argument_group('Task')
task_group.add_argument(
'--task', required=True, choices=TASKS, help='tasks choices')
task_group.add_argument(
'--train-number',
type=int,
default=10,
metavar='N',
help='size of training instances')
task_group.add_argument(
'--adjacent-pred-colors', type=int, default=4, metavar='N')
task_group.add_argument('--outdegree-n', type=int, default=2, metavar='N')
task_group.add_argument(
'--connectivity-dist-limit', type=int, default=4, metavar='N')
data_gen_group = parser.add_argument_group('Data Generation')
data_gen_group.add_argument(
'--gen-graph-method',
default='edge',
choices=['dnc', 'edge'],
help='method use to generate random graph')
data_gen_group.add_argument(
'--gen-graph-pmin',
type=float,
default=0.0,
metavar='F',
help='control parameter p reflecting the graph sparsity')
data_gen_group.add_argument(
'--gen-graph-pmax',
type=float,
default=0.3,
metavar='F',
help='control parameter p reflecting the graph sparsity')
data_gen_group.add_argument(
'--gen-graph-colors',
type=int,
default=4,
metavar='N',
help='number of colors in adjacent task')
data_gen_group.add_argument(
'--gen-directed', action='store_true', help='directed graph')
train_group = parser.add_argument_group('Train')
train_group.add_argument(
'--seed',
type=int,
default=None,
metavar='SEED',
help='seed of jacinle.random')
train_group.add_argument(
'--use-gpu', action='store_true', help='use GPU or not')
train_group.add_argument(
'--optimizer',
default='AdamW',
choices=['SGD', 'Adam', 'AdamW'],
help='optimizer choices')
train_group.add_argument(
'--lr',
type=float,
default=0.005,
metavar='F',
help='initial learning rate')
train_group.add_argument(
'--lr-decay',
type=float,
default=1.0,
metavar='F',
help='exponential decay of learning rate per lesson')
train_group.add_argument(
'--accum-grad',
type=int,
default=1,
metavar='N',
help='accumulated gradient for batches (default: 1)')
train_group.add_argument(
'--ohem-size',
type=int,
default=0,
metavar='N',
help='size of online hard negative mining')
train_group.add_argument(
'--batch-size',
type=int,
default=4,
metavar='N',
help='batch size for training')
train_group.add_argument(
'--test-batch-size',
type=int,
default=4,
metavar='N',
help='batch size for testing')
train_group.add_argument(
'--early-stop-loss-thresh',
type=float,
default=1e-5,
metavar='F',
help='threshold of loss for early stop')
# Note that nr_examples_per_epoch = epoch_size * batch_size
TrainerBase.make_trainer_parser(
parser, {
'epochs': 50,
'epoch_size': 250,
'test_epoch_size': 250,
'test_number_begin': 10,
'test_number_step': 10,
'test_number_end': 50,
})
io_group = parser.add_argument_group('Input/Output')
io_group.add_argument(
'--dump-dir', type=str, default=None, metavar='DIR', help='dump dir')
io_group.add_argument(
'--load-checkpoint',
type=str,
default=None,
metavar='FILE',
help='load parameters from checkpoint')
schedule_group = parser.add_argument_group('Schedule')
schedule_group.add_argument(
'--runs', type=int, default=1, metavar='N', help='number of runs')
schedule_group.add_argument(
'--save-interval',
type=int,
default=10,
metavar='N',
help='the interval(number of epochs) to save checkpoint')
schedule_group.add_argument(
'--test-interval',
type=int,
default=None,
metavar='N',
help='the interval(number of epochs) to do test')
schedule_group.add_argument(
'--test-only', action='store_true', help='test-only mode')
logger = get_logger(__file__)
args = parser.parse_args()
args.use_gpu = args.use_gpu and torch.cuda.is_available()
if args.dump_dir is not None:
io.mkdir(args.dump_dir)
args.log_file = os.path.join(args.dump_dir, 'log.log')
set_output_file(args.log_file)
else:
args.checkpoints_dir = None
args.summary_file = None
if args.seed is not None:
import jacinle.random as random
random.reset_global_seed(args.seed)
args.task_is_outdegree = args.task in ['outdegree']
args.task_is_connectivity = args.task in ['connectivity']
args.task_is_adjacent = args.task in ['adjacent', 'adjacent-mnist']
args.task_is_family_tree = args.task in [
'has-father', 'has-sister', 'grandparents', 'uncle', 'maternal-great-uncle'
]
args.task_is_mnist_input = args.task in ['adjacent-mnist']
args.task_is_1d_output = args.task in [
'outdegree', 'adjacent', 'adjacent-mnist', 'has-father', 'has-sister'
]
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = jacnn.Conv2dLayer(
1, 10, kernel_size=5, batch_norm=True, activation='relu')
self.conv2 = jacnn.Conv2dLayer(
10,
20,
kernel_size=5,
batch_norm=True,
dropout=False,
activation='relu')
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.max_pool2d(self.conv1(x), 2)
x = F.max_pool2d(self.conv2(x), 2)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class Model(nn.Module):
"""The model for family tree or general graphs path tasks."""
def __init__(self):
super().__init__()
# inputs
input_dim = 4 if args.task_is_family_tree else 1
self.feature_axis = 1 if args.task_is_1d_output else 2
# features
if args.model == 'nlm':
input_dims = [0 for _ in range(args.nlm_breadth + 1)]
if args.task_is_adjacent:
input_dims[1] = args.gen_graph_colors
if args.task_is_mnist_input:
self.lenet = LeNet()
input_dims[2] = input_dim
self.features = LogicMachine.from_args(
input_dims, args.nlm_attributes, args, prefix='nlm')
output_dim = self.features.output_dims[self.feature_axis]
elif args.model == 'memnet':
if args.task_is_adjacent:
input_dim += args.gen_graph_colors
self.feature = MemoryNet.from_args(
input_dim, self.feature_axis, args, prefix='memnet')
output_dim = self.feature.get_output_dim()
# target
target_dim = args.adjacent_pred_colors if args.task_is_adjacent else 1
self.pred = LogicInference(output_dim, target_dim, [])
# losses
if args.ohem_size > 0:
from jactorch.nn.losses import BinaryCrossEntropyLossWithProbs as BCELoss
self.loss = BCELoss(average='none')
else:
self.loss = nn.BCELoss()
def forward(self, feed_dict):
feed_dict = GView(feed_dict)
# properties
if args.task_is_adjacent:
states = feed_dict.states.float()
else:
states = None
# relations
relations = feed_dict.relations.float()
batch_size, nr = relations.size()[:2]
if args.model == 'nlm':
if args.task_is_adjacent and args.task_is_mnist_input:
states_shape = states.size()
states = states.view((-1,) + states_shape[2:])
states = self.lenet(states)
states = states.view(states_shape[:2] + (-1,))
states = F.sigmoid(states)
inp = [None for _ in range(args.nlm_breadth + 1)]
inp[1] = states
inp[2] = relations
depth = None
if args.nlm_recursion:
depth = 1
while 2**depth + 1 < nr:
depth += 1
depth = depth * 2 + 1
feature = self.features(inp, depth=depth)[self.feature_axis]
elif args.model == 'memnet':
feature = self.feature(relations, states)
if args.task_is_adjacent and args.task_is_mnist_input:
raise NotImplementedError()
pred = self.pred(feature)
if not args.task_is_adjacent:
pred = pred.squeeze(-1)
if args.task_is_connectivity:
pred = meshgrid_exclude_self(pred) # exclude self-cycle
if self.training:
monitors = dict()
target = feed_dict.target.float()
if args.task_is_adjacent:
target = target[:, :, :args.adjacent_pred_colors]
monitors.update(binary_accuracy(target, pred, return_float=False))
loss = self.loss(pred, target)
# ohem loss is unused.
if args.ohem_size > 0:
loss = loss.view(-1).topk(args.ohem_size)[0].mean()
return loss, monitors, dict(pred=pred)
else:
return dict(pred=pred)
def make_dataset(n, epoch_size, is_train):
pmin, pmax = args.gen_graph_pmin, args.gen_graph_pmax
if args.task_is_outdegree:
return GraphOutDegreeDataset(
args.outdegree_n,
epoch_size,
n,
pmin=pmin,
pmax=pmax,
directed=args.gen_directed,
gen_method=args.gen_graph_method)
elif args.task_is_connectivity:
nmin, nmax = n, n
if is_train and args.nlm_recursion:
nmin = 2
return GraphConnectivityDataset(
args.connectivity_dist_limit,
epoch_size,
nmin,
pmin,
nmax,
pmax,
directed=args.gen_directed,
gen_method=args.gen_graph_method)
elif args.task_is_adjacent:
return GraphAdjacentDataset(
args.gen_graph_colors,
epoch_size,
n,
pmin=pmin,
pmax=pmax,
directed=args.gen_directed,
gen_method=args.gen_graph_method,
is_train=is_train,
is_mnist_colors=args.task_is_mnist_input)
else:
return FamilyTreeDataset(args.task, epoch_size, n, p_marriage=1.0)
class MyTrainer(TrainerBase):
def save_checkpoint(self, name):
if args.checkpoints_dir is not None:
checkpoint_file = os.path.join(args.checkpoints_dir,
'checkpoint_{}.pth'.format(name))
super().save_checkpoint(checkpoint_file)
def _dump_meters(self, meters, mode):
if args.summary_file is not None:
meters_kv = meters._canonize_values('avg')
meters_kv['mode'] = mode
meters_kv['epoch'] = self.current_epoch
with open(args.summary_file, 'a') as f:
f.write(io.dumps_json(meters_kv))
f.write('\n')
data_iterator = {}
def _prepare_dataset(self, epoch_size, mode):
assert mode in ['train', 'test']
if mode == 'train':
batch_size = args.batch_size
number = args.train_number
else:
batch_size = args.test_batch_size
number = self.test_number
# The actual number of instances in an epoch is epoch_size * batch_size.
dataset = make_dataset(number, epoch_size * batch_size, mode == 'train')
dataloader = JacDataLoader(
dataset,
shuffle=True,
batch_size=batch_size,
num_workers=min(epoch_size, 4))
self.data_iterator[mode] = dataloader.__iter__()
def _get_data(self, index, meters, mode):
feed_dict = self.data_iterator[mode].next()
meters.update(number=feed_dict['n'].data.numpy().mean())
if args.use_gpu:
feed_dict = as_cuda(feed_dict)
return feed_dict
def _get_result(self, index, meters, mode):
feed_dict = self._get_data(index, meters, mode)
output_dict = self.model(feed_dict)
target = feed_dict['target']
if args.task_is_adjacent:
target = target[:, :, :args.adjacent_pred_colors]
result = binary_accuracy(target, output_dict['pred'])
succ = result['accuracy'] == 1.0
meters.update(succ=succ)
meters.update(result, n=target.size(0))
message = '> {} iter={iter}, accuracy={accuracy:.4f}, \
balance_acc={balanced_accuracy:.4f}'.format(
mode, iter=index, **meters.val)
return message, dict(succ=succ, feed_dict=feed_dict)
def _get_train_data(self, index, meters):
return self._get_data(index, meters, mode='train')
def _train_epoch(self, epoch_size):
meters = super()._train_epoch(epoch_size)
i = self.current_epoch
if args.save_interval is not None and i % args.save_interval == 0:
self.save_checkpoint(str(i))
if args.test_interval is not None and i % args.test_interval == 0:
self.test()
return meters
def _early_stop(self, meters):
return meters.avg['loss'] < args.early_stop_loss_thresh
def main(run_id):
if args.dump_dir is not None:
if args.runs > 1:
args.current_dump_dir = os.path.join(args.dump_dir,
'run_{}'.format(run_id))
io.mkdir(args.current_dump_dir)
else:
args.current_dump_dir = args.dump_dir
args.summary_file = os.path.join(args.current_dump_dir, 'summary.json')
args.checkpoints_dir = os.path.join(args.current_dump_dir, 'checkpoints')
io.mkdir(args.checkpoints_dir)
logger.info(format_args(args))
model = Model()
if args.use_gpu:
model.cuda()
optimizer = get_optimizer(args.optimizer, model, args.lr)
if args.accum_grad > 1:
optimizer = AccumGrad(optimizer, args.accum_grad)
trainer = MyTrainer.from_args(model, optimizer, args)
if args.load_checkpoint is not None:
trainer.load_checkpoint(args.load_checkpoint)
if args.test_only:
return None, trainer.test()
final_meters = trainer.train()
trainer.save_checkpoint('last')
return trainer.early_stopped, trainer.test()
if __name__ == '__main__':
stats = []
nr_graduated = 0
for i in range(args.runs):
graduated, test_meters = main(i)
logger.info('run {}'.format(i + 1))
if test_meters is not None:
for j, meters in enumerate(test_meters):
if len(stats) <= j:
stats.append(GroupMeters())
stats[j].update(
number=meters.avg['number'], test_acc=meters.avg['accuracy'])
for meters in stats:
logger.info('number {}, test_acc {}'.format(meters.avg['number'],
meters.avg['test_acc']))
if not args.test_only:
nr_graduated += int(graduated)
logger.info('graduate_ratio {}'.format(nr_graduated / (i + 1)))
if graduated:
for j, meters in enumerate(test_meters):
stats[j].update(grad_test_acc=meters.avg['accuracy'])
if nr_graduated > 0:
for meters in stats:
logger.info('number {}, grad_test_acc {}'.format(
meters.avg['number'], meters.avg['grad_test_acc']))
|
|
"""Support for Bluesound devices."""
import asyncio
from asyncio import CancelledError
from datetime import timedelta
import logging
from urllib import parse
import aiohttp
from aiohttp.client_exceptions import ClientError
from aiohttp.hdrs import CONNECTION, KEEP_ALIVE
import async_timeout
import voluptuous as vol
import xmltodict
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
DOMAIN,
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_HOSTS,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_BLUESOUND_GROUP = "bluesound_group"
ATTR_MASTER = "master"
DATA_BLUESOUND = "bluesound"
DEFAULT_PORT = 11000
NODE_OFFLINE_CHECK_TIMEOUT = 180
NODE_RETRY_INITIATION = timedelta(minutes=3)
SERVICE_CLEAR_TIMER = "bluesound_clear_sleep_timer"
SERVICE_JOIN = "bluesound_join"
SERVICE_SET_TIMER = "bluesound_set_sleep_timer"
SERVICE_UNJOIN = "bluesound_unjoin"
STATE_GROUPED = "grouped"
SYNC_STATUS_INTERVAL = timedelta(minutes=5)
UPDATE_CAPTURE_INTERVAL = timedelta(minutes=30)
UPDATE_PRESETS_INTERVAL = timedelta(minutes=30)
UPDATE_SERVICES_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOSTS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
],
)
}
)
BS_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
BS_JOIN_SCHEMA = BS_SCHEMA.extend({vol.Required(ATTR_MASTER): cv.entity_id})
SERVICE_TO_METHOD = {
SERVICE_JOIN: {"method": "async_join", "schema": BS_JOIN_SCHEMA},
SERVICE_UNJOIN: {"method": "async_unjoin", "schema": BS_SCHEMA},
SERVICE_SET_TIMER: {"method": "async_increase_timer", "schema": BS_SCHEMA},
SERVICE_CLEAR_TIMER: {"method": "async_clear_timer", "schema": BS_SCHEMA},
}
def _add_player(hass, async_add_entities, host, port=None, name=None):
"""Add Bluesound players."""
if host in [x.host for x in hass.data[DATA_BLUESOUND]]:
return
@callback
def _init_player(event=None):
"""Start polling."""
hass.async_create_task(player.async_init())
@callback
def _start_polling(event=None):
"""Start polling."""
player.start_polling()
@callback
def _stop_polling():
"""Stop polling."""
player.stop_polling()
@callback
def _add_player_cb():
"""Add player after first sync fetch."""
async_add_entities([player])
_LOGGER.info("Added device with name: %s", player.name)
if hass.is_running:
_start_polling()
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _start_polling)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_polling)
player = BluesoundPlayer(hass, host, port, name, _add_player_cb)
hass.data[DATA_BLUESOUND].append(player)
if hass.is_running:
_init_player()
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _init_player)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Bluesound platforms."""
if DATA_BLUESOUND not in hass.data:
hass.data[DATA_BLUESOUND] = []
if discovery_info:
_add_player(
hass,
async_add_entities,
discovery_info.get(CONF_HOST),
discovery_info.get(CONF_PORT, None),
)
return
hosts = config.get(CONF_HOSTS, None)
if hosts:
for host in hosts:
_add_player(
hass,
async_add_entities,
host.get(CONF_HOST),
host.get(CONF_PORT),
host.get(CONF_NAME),
)
async def async_service_handler(service):
"""Map services to method of Bluesound devices."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_players = [
player
for player in hass.data[DATA_BLUESOUND]
if player.entity_id in entity_ids
]
else:
target_players = hass.data[DATA_BLUESOUND]
for player in target_players:
await getattr(player, method["method"])(**params)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]["schema"]
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=schema
)
class BluesoundPlayer(MediaPlayerDevice):
"""Representation of a Bluesound Player."""
def __init__(self, hass, host, port=None, name=None, init_callback=None):
"""Initialize the media player."""
self.host = host
self._hass = hass
self.port = port
self._polling_session = async_get_clientsession(hass)
self._polling_task = None # The actual polling task.
self._name = name
self._icon = None
self._capture_items = []
self._services_items = []
self._preset_items = []
self._sync_status = {}
self._status = None
self._last_status_update = None
self._is_online = False
self._retry_remove = None
self._lastvol = None
self._master = None
self._is_master = False
self._group_name = None
self._group_list = []
self._bluesound_device_name = None
self._init_callback = init_callback
if self.port is None:
self.port = DEFAULT_PORT
class _TimeoutException(Exception):
pass
@staticmethod
def _try_get_index(string, search_string):
"""Get the index."""
try:
return string.index(search_string)
except ValueError:
return -1
async def force_update_sync_status(self, on_updated_cb=None, raise_timeout=False):
"""Update the internal status."""
resp = await self.send_bluesound_command(
"SyncStatus", raise_timeout, raise_timeout
)
if not resp:
return None
self._sync_status = resp["SyncStatus"].copy()
if not self._name:
self._name = self._sync_status.get("@name", self.host)
if not self._bluesound_device_name:
self._bluesound_device_name = self._sync_status.get("@name", self.host)
if not self._icon:
self._icon = self._sync_status.get("@icon", self.host)
master = self._sync_status.get("master", None)
if master is not None:
self._is_master = False
master_host = master.get("#text")
master_device = [
device
for device in self._hass.data[DATA_BLUESOUND]
if device.host == master_host
]
if master_device and master_host != self.host:
self._master = master_device[0]
else:
self._master = None
_LOGGER.error("Master not found %s", master_host)
else:
if self._master is not None:
self._master = None
slaves = self._sync_status.get("slave", None)
self._is_master = slaves is not None
if on_updated_cb:
on_updated_cb()
return True
async def _start_poll_command(self):
"""Loop which polls the status of the player."""
try:
while True:
await self.async_update_status()
except (asyncio.TimeoutError, ClientError, BluesoundPlayer._TimeoutException):
_LOGGER.info("Node %s is offline, retrying later", self._name)
await asyncio.sleep(NODE_OFFLINE_CHECK_TIMEOUT)
self.start_polling()
except CancelledError:
_LOGGER.debug("Stopping the polling of node %s", self._name)
except Exception:
_LOGGER.exception("Unexpected error in %s", self._name)
raise
def start_polling(self):
"""Start the polling task."""
self._polling_task = self._hass.async_create_task(self._start_poll_command())
def stop_polling(self):
"""Stop the polling task."""
self._polling_task.cancel()
async def async_init(self, triggered=None):
"""Initialize the player async."""
try:
if self._retry_remove is not None:
self._retry_remove()
self._retry_remove = None
await self.force_update_sync_status(self._init_callback, True)
except (asyncio.TimeoutError, ClientError):
_LOGGER.info("Node %s is offline, retrying later", self.host)
self._retry_remove = async_track_time_interval(
self._hass, self.async_init, NODE_RETRY_INITIATION
)
except Exception:
_LOGGER.exception("Unexpected when initiating error in %s", self.host)
raise
async def async_update(self):
"""Update internal status of the entity."""
if not self._is_online:
return
await self.async_update_sync_status()
await self.async_update_presets()
await self.async_update_captures()
await self.async_update_services()
async def send_bluesound_command(
self, method, raise_timeout=False, allow_offline=False
):
"""Send command to the player."""
if not self._is_online and not allow_offline:
return
if method[0] == "/":
method = method[1:]
url = f"http://{self.host}:{self.port}/{method}"
_LOGGER.debug("Calling URL: %s", url)
response = None
try:
websession = async_get_clientsession(self._hass)
with async_timeout.timeout(10):
response = await websession.get(url)
if response.status == 200:
result = await response.text()
if result:
data = xmltodict.parse(result)
else:
data = None
elif response.status == 595:
_LOGGER.info("Status 595 returned, treating as timeout")
raise BluesoundPlayer._TimeoutException()
else:
_LOGGER.error("Error %s on %s", response.status, url)
return None
except (asyncio.TimeoutError, aiohttp.ClientError):
if raise_timeout:
_LOGGER.info("Timeout: %s", self.host)
raise
_LOGGER.debug("Failed communicating: %s", self.host)
return None
return data
async def async_update_status(self):
"""Use the poll session to always get the status of the player."""
response = None
url = "Status"
etag = ""
if self._status is not None:
etag = self._status.get("@etag", "")
if etag != "":
url = f"Status?etag={etag}&timeout=120.0"
url = f"http://{self.host}:{self.port}/{url}"
_LOGGER.debug("Calling URL: %s", url)
try:
with async_timeout.timeout(125):
response = await self._polling_session.get(
url, headers={CONNECTION: KEEP_ALIVE}
)
if response.status == 200:
result = await response.text()
self._is_online = True
self._last_status_update = dt_util.utcnow()
self._status = xmltodict.parse(result)["status"].copy()
group_name = self._status.get("groupName", None)
if group_name != self._group_name:
_LOGGER.debug("Group name change detected on device: %s", self.host)
self._group_name = group_name
# rebuild ordered list of entity_ids that are in the group, master is first
self._group_list = self.rebuild_bluesound_group()
# the sleep is needed to make sure that the
# devices is synced
await asyncio.sleep(1)
await self.async_trigger_sync_on_all()
elif self.is_grouped:
# when player is grouped we need to fetch volume from
# sync_status. We will force an update if the player is
# grouped this isn't a foolproof solution. A better
# solution would be to fetch sync_status more often when
# the device is playing. This would solve alot of
# problems. This change will be done when the
# communication is moved to a separate library
await self.force_update_sync_status()
self.async_schedule_update_ha_state()
elif response.status == 595:
_LOGGER.info("Status 595 returned, treating as timeout")
raise BluesoundPlayer._TimeoutException()
else:
_LOGGER.error(
"Error %s on %s. Trying one more time", response.status, url
)
except (asyncio.TimeoutError, ClientError):
self._is_online = False
self._last_status_update = None
self._status = None
self.async_schedule_update_ha_state()
_LOGGER.info("Client connection error, marking %s as offline", self._name)
raise
async def async_trigger_sync_on_all(self):
"""Trigger sync status update on all devices."""
_LOGGER.debug("Trigger sync status on all devices")
for player in self._hass.data[DATA_BLUESOUND]:
await player.force_update_sync_status()
@Throttle(SYNC_STATUS_INTERVAL)
async def async_update_sync_status(self, on_updated_cb=None, raise_timeout=False):
"""Update sync status."""
await self.force_update_sync_status(on_updated_cb, raise_timeout=False)
@Throttle(UPDATE_CAPTURE_INTERVAL)
async def async_update_captures(self):
"""Update Capture sources."""
resp = await self.send_bluesound_command("RadioBrowse?service=Capture")
if not resp:
return
self._capture_items = []
def _create_capture_item(item):
self._capture_items.append(
{
"title": item.get("@text", ""),
"name": item.get("@text", ""),
"type": item.get("@serviceType", "Capture"),
"image": item.get("@image", ""),
"url": item.get("@URL", ""),
}
)
if "radiotime" in resp and "item" in resp["radiotime"]:
if isinstance(resp["radiotime"]["item"], list):
for item in resp["radiotime"]["item"]:
_create_capture_item(item)
else:
_create_capture_item(resp["radiotime"]["item"])
return self._capture_items
@Throttle(UPDATE_PRESETS_INTERVAL)
async def async_update_presets(self):
"""Update Presets."""
resp = await self.send_bluesound_command("Presets")
if not resp:
return
self._preset_items = []
def _create_preset_item(item):
self._preset_items.append(
{
"title": item.get("@name", ""),
"name": item.get("@name", ""),
"type": "preset",
"image": item.get("@image", ""),
"is_raw_url": True,
"url2": item.get("@url", ""),
"url": "Preset?id={}".format(item.get("@id", "")),
}
)
if "presets" in resp and "preset" in resp["presets"]:
if isinstance(resp["presets"]["preset"], list):
for item in resp["presets"]["preset"]:
_create_preset_item(item)
else:
_create_preset_item(resp["presets"]["preset"])
return self._preset_items
@Throttle(UPDATE_SERVICES_INTERVAL)
async def async_update_services(self):
"""Update Services."""
resp = await self.send_bluesound_command("Services")
if not resp:
return
self._services_items = []
def _create_service_item(item):
self._services_items.append(
{
"title": item.get("@displayname", ""),
"name": item.get("@name", ""),
"type": item.get("@type", ""),
"image": item.get("@icon", ""),
"url": item.get("@name", ""),
}
)
if "services" in resp and "service" in resp["services"]:
if isinstance(resp["services"]["service"], list):
for item in resp["services"]["service"]:
_create_service_item(item)
else:
_create_service_item(resp["services"]["service"])
return self._services_items
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
if self._status is None:
return STATE_OFF
if self.is_grouped and not self.is_master:
return STATE_GROUPED
status = self._status.get("state", None)
if status in ("pause", "stop"):
return STATE_PAUSED
if status in ("stream", "play"):
return STATE_PLAYING
return STATE_IDLE
@property
def media_title(self):
"""Title of current playing media."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
return self._status.get("title1", None)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
if self._status is None:
return None
if self.is_grouped and not self.is_master:
return self._group_name
artist = self._status.get("artist", None)
if not artist:
artist = self._status.get("title2", None)
return artist
@property
def media_album_name(self):
"""Artist of current playing media (Music track only)."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
album = self._status.get("album", None)
if not album:
album = self._status.get("title3", None)
return album
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
url = self._status.get("image", None)
if not url:
return
if url[0] == "/":
url = f"http://{self.host}:{self.port}{url}"
return url
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
mediastate = self.state
if self._last_status_update is None or mediastate == STATE_IDLE:
return None
position = self._status.get("secs", None)
if position is None:
return None
position = float(position)
if mediastate == STATE_PLAYING:
position += (dt_util.utcnow() - self._last_status_update).total_seconds()
return position
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
duration = self._status.get("totlen", None)
if duration is None:
return None
return float(duration)
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_status_update
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
volume = self._status.get("volume", None)
if self.is_grouped:
volume = self._sync_status.get("@volume", None)
if volume is not None:
return int(volume) / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
volume = self.volume_level
if not volume:
return None
return 0 <= volume < 0.001
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def bluesound_device_name(self):
"""Return the device name as returned by the device."""
return self._bluesound_device_name
@property
def icon(self):
"""Return the icon of the device."""
return self._icon
@property
def source_list(self):
"""List of available input sources."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
sources = []
for source in self._preset_items:
sources.append(source["title"])
for source in [
x
for x in self._services_items
if x["type"] == "LocalMusic" or x["type"] == "RadioService"
]:
sources.append(source["title"])
for source in self._capture_items:
sources.append(source["title"])
return sources
@property
def source(self):
"""Name of the current input source."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
current_service = self._status.get("service", "")
if current_service == "":
return ""
stream_url = self._status.get("streamUrl", "")
if self._status.get("is_preset", "") == "1" and stream_url != "":
# This check doesn't work with all presets, for example playlists.
# But it works with radio service_items will catch playlists.
items = [
x
for x in self._preset_items
if "url2" in x and parse.unquote(x["url2"]) == stream_url
]
if items:
return items[0]["title"]
# This could be a bit difficult to detect. Bluetooth could be named
# different things and there is not any way to match chooses in
# capture list to current playing. It's a bit of guesswork.
# This method will be needing some tweaking over time.
title = self._status.get("title1", "").lower()
if title == "bluetooth" or stream_url == "Capture:hw:2,0/44100/16/2":
items = [
x
for x in self._capture_items
if x["url"] == "Capture%3Abluez%3Abluetooth"
]
if items:
return items[0]["title"]
items = [x for x in self._capture_items if x["url"] == stream_url]
if items:
return items[0]["title"]
if stream_url[:8] == "Capture:":
stream_url = stream_url[8:]
idx = BluesoundPlayer._try_get_index(stream_url, ":")
if idx > 0:
stream_url = stream_url[:idx]
for item in self._capture_items:
url = parse.unquote(item["url"])
if url[:8] == "Capture:":
url = url[8:]
idx = BluesoundPlayer._try_get_index(url, ":")
if idx > 0:
url = url[:idx]
if url.lower() == stream_url.lower():
return item["title"]
items = [x for x in self._capture_items if x["name"] == current_service]
if items:
return items[0]["title"]
items = [x for x in self._services_items if x["name"] == current_service]
if items:
return items[0]["title"]
if self._status.get("streamUrl", "") != "":
_LOGGER.debug(
"Couldn't find source of stream URL: %s",
self._status.get("streamUrl", ""),
)
return None
@property
def supported_features(self):
"""Flag of media commands that are supported."""
if self._status is None:
return None
if self.is_grouped and not self.is_master:
return SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE
supported = SUPPORT_CLEAR_PLAYLIST
if self._status.get("indexing", "0") == "0":
supported = (
supported
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_PLAY
| SUPPORT_SELECT_SOURCE
| SUPPORT_SHUFFLE_SET
)
current_vol = self.volume_level
if current_vol is not None and current_vol >= 0:
supported = (
supported
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
)
if self._status.get("canSeek", "") == "1":
supported = supported | SUPPORT_SEEK
return supported
@property
def is_master(self):
"""Return true if player is a coordinator."""
return self._is_master
@property
def is_grouped(self):
"""Return true if player is a coordinator."""
return self._master is not None or self._is_master
@property
def shuffle(self):
"""Return true if shuffle is active."""
return self._status.get("shuffle", "0") == "1"
async def async_join(self, master):
"""Join the player to a group."""
master_device = [
device
for device in self.hass.data[DATA_BLUESOUND]
if device.entity_id == master
]
if master_device:
_LOGGER.debug(
"Trying to join player: %s to master: %s",
self.host,
master_device[0].host,
)
await master_device[0].async_add_slave(self)
else:
_LOGGER.error("Master not found %s", master_device)
@property
def device_state_attributes(self):
"""List members in group."""
attributes = {}
if self._group_list:
attributes = {ATTR_BLUESOUND_GROUP: self._group_list}
attributes[ATTR_MASTER] = self._is_master
return attributes
def rebuild_bluesound_group(self):
"""Rebuild the list of entities in speaker group."""
if self._group_name is None:
return None
bluesound_group = []
device_group = self._group_name.split("+")
sorted_entities = sorted(
self._hass.data[DATA_BLUESOUND],
key=lambda entity: entity.is_master,
reverse=True,
)
bluesound_group = [
entity.name
for entity in sorted_entities
if entity.bluesound_device_name in device_group
]
return bluesound_group
async def async_unjoin(self):
"""Unjoin the player from a group."""
if self._master is None:
return
_LOGGER.debug("Trying to unjoin player: %s", self.host)
await self._master.async_remove_slave(self)
async def async_add_slave(self, slave_device):
"""Add slave to master."""
return await self.send_bluesound_command(
f"/AddSlave?slave={slave_device.host}&port={slave_device.port}"
)
async def async_remove_slave(self, slave_device):
"""Remove slave to master."""
return await self.send_bluesound_command(
f"/RemoveSlave?slave={slave_device.host}&port={slave_device.port}"
)
async def async_increase_timer(self):
"""Increase sleep time on player."""
sleep_time = await self.send_bluesound_command("/Sleep")
if sleep_time is None:
_LOGGER.error("Error while increasing sleep time on player: %s", self.host)
return 0
return int(sleep_time.get("sleep", "0"))
async def async_clear_timer(self):
"""Clear sleep timer on player."""
sleep = 1
while sleep > 0:
sleep = await self.async_increase_timer()
async def async_set_shuffle(self, shuffle):
"""Enable or disable shuffle mode."""
value = "1" if shuffle else "0"
return await self.send_bluesound_command(f"/Shuffle?state={value}")
async def async_select_source(self, source):
"""Select input source."""
if self.is_grouped and not self.is_master:
return
items = [x for x in self._preset_items if x["title"] == source]
if not items:
items = [x for x in self._services_items if x["title"] == source]
if not items:
items = [x for x in self._capture_items if x["title"] == source]
if not items:
return
selected_source = items[0]
url = "Play?url={}&preset_id&image={}".format(
selected_source["url"], selected_source["image"]
)
if "is_raw_url" in selected_source and selected_source["is_raw_url"]:
url = selected_source["url"]
return await self.send_bluesound_command(url)
async def async_clear_playlist(self):
"""Clear players playlist."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Clear")
async def async_media_next_track(self):
"""Send media_next command to media player."""
if self.is_grouped and not self.is_master:
return
cmd = "Skip"
if self._status and "actions" in self._status:
for action in self._status["actions"]["action"]:
if "@name" in action and "@url" in action and action["@name"] == "skip":
cmd = action["@url"]
return await self.send_bluesound_command(cmd)
async def async_media_previous_track(self):
"""Send media_previous command to media player."""
if self.is_grouped and not self.is_master:
return
cmd = "Back"
if self._status and "actions" in self._status:
for action in self._status["actions"]["action"]:
if "@name" in action and "@url" in action and action["@name"] == "back":
cmd = action["@url"]
return await self.send_bluesound_command(cmd)
async def async_media_play(self):
"""Send media_play command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Play")
async def async_media_pause(self):
"""Send media_pause command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Pause")
async def async_media_stop(self):
"""Send stop command."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Pause")
async def async_media_seek(self, position):
"""Send media_seek command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Play?seek={}".format(float(position)))
async def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if self.is_grouped and not self.is_master:
return
url = f"Play?url={media_id}"
if kwargs.get(ATTR_MEDIA_ENQUEUE):
return await self.send_bluesound_command(url)
return await self.send_bluesound_command(url)
async def async_volume_up(self):
"""Volume up the media player."""
current_vol = self.volume_level
if not current_vol or current_vol < 0:
return
return self.async_set_volume_level(((current_vol * 100) + 1) / 100)
async def async_volume_down(self):
"""Volume down the media player."""
current_vol = self.volume_level
if not current_vol or current_vol < 0:
return
return self.async_set_volume_level(((current_vol * 100) - 1) / 100)
async def async_set_volume_level(self, volume):
"""Send volume_up command to media player."""
if volume < 0:
volume = 0
elif volume > 1:
volume = 1
return await self.send_bluesound_command(
"Volume?level=" + str(float(volume) * 100)
)
async def async_mute_volume(self, mute):
"""Send mute command to media player."""
if mute:
volume = self.volume_level
if volume > 0:
self._lastvol = volume
return await self.send_bluesound_command("Volume?level=0")
return await self.send_bluesound_command(
"Volume?level=" + str(float(self._lastvol) * 100)
)
|
|
import pytest
from rpyre.interface.lua import compile_re
from rpyre.matching import find
from rpyre.nfa import (
StateMatch, StateChar, StateSplit, StateDot, StateCharRange,
)
class TestLuaPatternsFind(object):
def test_simple_or(self):
expr = compile_re('(aa|bb)')
result = find(expr, 'xyzabbaab', 0)
assert list(result) == [(5, 6), (7, 8)]
def test_grouped_or_between_chars(self):
expr = compile_re('x(aa|bb)x')
result = find(expr, 'axaaxaxbxbbxa', 0)
assert list(result) == [(2, 5), (9, 12)]
def test_chained_grouped_or_match(self):
expr = compile_re('x(aa|bb)(cc|dd)x')
result = find(expr, 'axaaddaxbbccxxaacx', 0)
assert list(result) == [(8, 13)]
def test_chained_grouped_or_no_match(self):
expr = compile_re('x(aa|bb)(cc|dd)x')
result = find(expr, 'xaaccddxxaaddddxxaacc', 0)
assert list(result) == [(-1, -1)]
def test_grouped_star(self):
expr = compile_re('(ab)*')
result = find(expr, 'ababababab', 0)
assert list(result) == [(1, 10)]
def test_grouped_star_between_chars_match(self):
expr = compile_re('x(ab)*x')
result = find(expr, 'ababxababxabab', 0)
assert list(result) == [(5, 10)]
def test_grouped_star_between_chars_no_match(self):
expr = compile_re('x(ab)*x')
result = find(expr, 'ababxabababab', 0)
assert list(result) == [(-1, -1)]
def test_grouped_star_and_or_match(self):
expr = compile_re('x((aa)*|(bb)*)x')
result = find(expr, 'xaaaaaaxxx', 0)
assert list(result) == [(1, 8), (9, 10)]
def test_grouped_star_and_or_no_match(self):
expr = compile_re('x((aa)*|(bb)*)x')
result = find(expr, 'xaaaaaxbxabxbbbb', 0)
assert list(result) == [(-1, -1)]
def test_simple_plus(self):
expr = compile_re('a+')
result = find(expr, 'bxaaaabak', 0)
assert list(result) == [(3, 6), (8, 8)]
def test_grouped_plus(self):
expr = compile_re('(a|b)+c')
result = find(expr, 'xxcaababcvbc', 0)
assert list(result) == [(4, 9), (11, 12)]
def test_or_repetition(self):
expr = compile_re('(aa|bb){2}')
result = find(expr, 'xabbxaaaaxjkbbajbbaal', 0)
assert list(result) == [(6, 9), (17, 20)]
def test_match_evil(self):
expr = compile_re('(a|b)*a(a|b){5}a(a|b)*')
result = find(expr, 'aaaababababba', 0)
assert list(result) == [(1, 13)]
def test_match_evil_no_match(self):
expr = compile_re('(a|b)*a(a|b){5}a(a|b)*')
result = find(expr, 'aaaaaaxbbbbaaaaabbbbbbb', 0)
assert list(result) == [(-1, -1)]
class TestLuaCompile(object):
def test_single_char_build_expr(self):
expr = compile_re('a')
assert isinstance(expr, StateChar)
assert expr.start == ord('a')
assert expr.stop == ord('a')
def test_two_chars_build_expr(self):
expr = compile_re('ab')
assert isinstance(expr, StateChar)
assert expr.start == ord('a')
assert isinstance(expr.out, StateChar)
assert expr.out.start == ord('b')
def test_three_chars_build_expr(self):
expr = compile_re('abc')
assert isinstance(expr, StateChar)
assert expr.start == ord('a')
assert isinstance(expr.out, StateChar)
assert expr.out.start == ord('b')
assert isinstance(expr.out.out, StateChar)
assert expr.out.out.start == ord('c')
def test_chars_and_dots_build_expr(self):
expr = compile_re('a.c.', False)
assert isinstance(expr, StateChar)
assert expr.start == ord('a')
assert isinstance(expr.out, StateDot)
assert isinstance(expr.out.out, StateChar)
assert expr.out.out.start == ord('c')
assert isinstance(expr.out.out.out, StateDot)
assert isinstance(expr.out.out.out.out, StateMatch)
def test_chars_and_special_a_build_expr(self):
expr = compile_re('%aa%a', False)
assert isinstance(expr, StateCharRange)
assert expr.start == ord('A')
assert expr.stop == ord('z')
assert isinstance(expr.out, StateChar)
assert expr.out.stop == ord('a')
assert isinstance(expr.out.out, StateCharRange)
assert expr.out.out.start == ord('A')
assert expr.out.out.stop == ord('z')
assert isinstance(expr.out.out.out, StateMatch)
def test_escape_percent_build_expr(self):
expr = compile_re('%%', False)
assert isinstance(expr, StateChar)
assert expr.start == ord('%')
assert isinstance(expr.out, StateMatch)
def test_build_expr_pattern_with_star(self):
expr = compile_re('a*', False)
assert isinstance(expr, StateSplit)
assert isinstance(expr.out, StateChar)
assert expr.out.out == expr
assert expr.out.start == ord('a')
assert isinstance(expr.out2, StateMatch)
def test_build_expr_pattern_with_star_2(self):
expr = compile_re('a*b*', False)
assert isinstance(expr, StateSplit)
assert isinstance(expr.out, StateChar)
assert expr.out.out == expr
assert expr.out.start == ord('a')
assert isinstance(expr.out2, StateSplit)
assert isinstance(expr.out2.out, StateChar)
assert expr.out2.out.out == expr.out2
assert expr.out2.out.start == ord('b')
assert isinstance(expr.out2.out2, StateMatch)
def test_build_expr_pattern_with_star_3(self):
expr = compile_re('a*cb*', False)
assert isinstance(expr, StateSplit)
assert isinstance(expr.out, StateChar)
assert expr.out.out == expr
assert expr.out.start == ord('a')
assert isinstance(expr.out2, StateChar)
assert expr.out2.start == ord('c')
assert isinstance(expr.out2.out, StateSplit)
assert isinstance(expr.out2.out.out, StateChar)
assert expr.out2.out.out.out == expr.out2.out
assert expr.out2.out.out.start == ord('b')
assert isinstance(expr.out2.out.out2, StateMatch)
def test_build_expr_pattern_with_star_4(self):
expr = compile_re('a.*c%a*', False)
# a
assert isinstance(expr, StateChar)
assert expr.start == ord('a')
# .*
node = expr.out
assert isinstance(node, StateSplit)
assert isinstance(node.out, StateDot)
assert node.out.out == node
# c
node = node.out2
assert isinstance(node, StateChar)
assert node.start == ord('c')
# %a*
node = node.out
assert isinstance(node, StateSplit)
assert isinstance(node.out, StateCharRange)
assert node.out.start == ord('A')
assert node.out.stop == ord('z')
assert node.out.out == node
# match
node = node.out2
assert isinstance(node, StateMatch)
def test_build_expr_simple_or(self):
expr = compile_re('a|b', False)
# |
assert isinstance(expr, StateSplit)
# a
node = expr.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
assert isinstance(node.out, StateMatch)
# b
node = expr.out2
assert isinstance(node, StateChar)
assert node.stop == ord('b')
assert isinstance(node.out, StateMatch)
def test_build_group_star(self):
expr = compile_re('(ab)*', False)
# *
assert isinstance(expr, StateSplit)
# a
node = expr.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
# b
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('b')
assert node.out == expr
# match
assert isinstance(expr.out2, StateMatch)
def test_build_group_star_chained(self):
expr = compile_re('(ab)*ab', False)
# *
assert isinstance(expr, StateSplit)
# a
node = expr.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
# b
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('b')
assert node.out == expr
# ab
node = expr.out2
assert isinstance(node, StateChar)
assert node.stop == ord('a')
# b
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('b')
# match
assert isinstance(node.out, StateMatch)
def test_build_group_or(self):
expr = compile_re('(aa|bb)', False)
# |
assert isinstance(expr, StateSplit)
# aa
node = expr.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
assert isinstance(node.out, StateMatch)
# bb
node = expr.out2
assert isinstance(node, StateChar)
assert node.stop == ord('b')
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('b')
assert isinstance(node.out, StateMatch)
def test_build_group_or_between_chars(self):
expr = compile_re('x(aa|bb)x')
# xaax
assert isinstance(expr, StateChar)
assert expr.start == ord('x')
# |
node = expr.out
assert isinstance(node, StateSplit)
# aax
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('a')
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('x')
assert isinstance(node.out, StateMatch)
# bbx
node = expr.out.out2
assert isinstance(node, StateChar)
assert node.stop == ord('b')
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('b')
node = node.out
assert isinstance(node, StateChar)
assert node.stop == ord('x')
assert isinstance(node.out, StateMatch)
def test_build_expr_with_repitition(self):
expr = compile_re('a{3}')
assert isinstance(expr, StateChar)
assert isinstance(expr.out, StateChar)
assert isinstance(expr.out.out, StateChar)
assert isinstance(expr.out.out.out, StateMatch)
def test_build_expr_misplaced_star(self):
with pytest.raises(RuntimeError):
compile_re('*')
def test_build_expr_invalid_special_char(self):
with pytest.raises(RuntimeError):
compile_re('%,')
def test_build_expr_misplaced_percent_1(self):
with pytest.raises(RuntimeError):
compile_re('%')
def test_build_expr_misplaced_percent_2(self):
with pytest.raises(RuntimeError):
compile_re('a%%%')
def test_build_expr_misplaced_percent_3(self):
with pytest.raises(RuntimeError):
compile_re('a%')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.