gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
"""
Proposal Operator transform anchor coordinates into ROI coordinates with prediction results on
classification probability and bounding box prediction results, and image size and scale information.
"""
import mxnet as mx
import numpy as np
import numpy.random as npr
from distutils.util import strtobool
from bbox.bbox_transform import bbox_pred, clip_boxes
from rpn.generate_anchor import generate_anchors
from nms.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
DEBUG = False
class ProposalOperator(mx.operator.CustomOp):
def __init__(self, feat_stride, scales, ratios, output_score,
rpn_pre_nms_top_n, rpn_post_nms_top_n, threshold, rpn_min_size):
super(ProposalOperator, self).__init__()
self._feat_stride = feat_stride
self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',')
self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',')
self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios)
self._num_anchors = self._anchors.shape[0]
self._output_score = output_score
self._rpn_pre_nms_top_n = rpn_pre_nms_top_n
self._rpn_post_nms_top_n = rpn_post_nms_top_n
self._threshold = threshold
self._rpn_min_size = rpn_min_size
if DEBUG:
print 'feat_stride: {}'.format(self._feat_stride)
print 'anchors:'
print self._anchors
def forward(self, is_train, req, in_data, out_data, aux):
nms = gpu_nms_wrapper(self._threshold, in_data[0].context.device_id)
batch_size = in_data[0].shape[0]
if batch_size > 1:
raise ValueError("Sorry, multiple images each device is not implemented")
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
pre_nms_topN = self._rpn_pre_nms_top_n
post_nms_topN = self._rpn_post_nms_top_n
min_size = self._rpn_min_size
# the first set of anchors are background probabilities
# keep the second part
scores = in_data[0].asnumpy()[:, self._num_anchors:, :, :]
bbox_deltas = in_data[1].asnumpy()
im_info = in_data[2].asnumpy()[0, :]
if DEBUG:
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
# 1. Generate proposals from bbox_deltas and shifted anchors
# use real image size instead of padded feature map sizes
height, width = int(im_info[0] / self._feat_stride), int(im_info[1] / self._feat_stride)
if DEBUG:
print 'score map size: {}'.format(scores.shape)
print "resudial: {}".format((scores.shape[2] - height, scores.shape[3] - width))
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = self._clip_pad(bbox_deltas, (height, width))
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = self._clip_pad(scores, (height, width))
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_pred(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = self._filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
det = np.hstack((proposals, scores)).astype(np.float32)
keep = nms(det)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
# pad to ensure output size remains unchanged
if len(keep) < post_nms_topN:
pad = npr.choice(keep, size=post_nms_topN - len(keep))
keep = np.hstack((keep, pad))
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois array
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
self.assign(out_data[0], req[0], blob)
if self._output_score:
self.assign(out_data[1], req[1], scores.astype(np.float32, copy=False))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
self.assign(in_grad[2], req[2], 0)
@staticmethod
def _filter_boxes(boxes, min_size):
""" Remove all boxes with any side smaller than min_size """
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
@staticmethod
def _clip_pad(tensor, pad_shape):
"""
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
"""
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor
@mx.operator.register("proposal")
class ProposalProp(mx.operator.CustomOpProp):
def __init__(self, feat_stride='16', scales='(8, 16, 32)', ratios='(0.5, 1, 2)', output_score='False',
rpn_pre_nms_top_n='6000', rpn_post_nms_top_n='300', threshold='0.3', rpn_min_size='16'):
super(ProposalProp, self).__init__(need_top_grad=False)
self._feat_stride = int(feat_stride)
self._scales = scales
self._ratios = ratios
self._output_score = strtobool(output_score)
self._rpn_pre_nms_top_n = int(rpn_pre_nms_top_n)
self._rpn_post_nms_top_n = int(rpn_post_nms_top_n)
self._threshold = float(threshold)
self._rpn_min_size = int(rpn_min_size)
def list_arguments(self):
return ['cls_prob', 'bbox_pred', 'im_info']
def list_outputs(self):
if self._output_score:
return ['output', 'score']
else:
return ['output']
def infer_shape(self, in_shape):
cls_prob_shape = in_shape[0]
bbox_pred_shape = in_shape[1]
assert cls_prob_shape[0] == bbox_pred_shape[0], 'ROI number does not equal in cls and reg'
batch_size = cls_prob_shape[0]
im_info_shape = (batch_size, 3)
output_shape = (self._rpn_post_nms_top_n, 5)
score_shape = (self._rpn_post_nms_top_n, 1)
if self._output_score:
return [cls_prob_shape, bbox_pred_shape, im_info_shape], [output_shape, score_shape]
else:
return [cls_prob_shape, bbox_pred_shape, im_info_shape], [output_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalOperator(self._feat_stride, self._scales, self._ratios, self._output_score,
self._rpn_pre_nms_top_n, self._rpn_post_nms_top_n, self._threshold, self._rpn_min_size)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
|
|
import asyncio, discord, re, random
from operator import itemgetter
from discord.ext import commands
from Cogs import Utils, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(BotAdmin(bot, settings))
class BotAdmin(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.dregex = re.compile(r"(?i)(discord(\.gg|app\.com)\/)(?!attachments)([^\s]+)")
self.mention_re = re.compile(r"[0-9]{17,21}")
self.removal = re.compile(r"(?i)-?r(em(ove|oval)?)?=\d+")
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def message(self, message):
# Check for discord invite links and remove them if found - per server settings
if not self.dregex.search(message.content): return None # No invite in the passed message - nothing to do
# Got an invite - let's see if we care
if not self.settings.getServerStat(message.guild,"RemoveInviteLinks",False): return None # We don't care
# We *do* care, let's see if the author is admin/bot-admin as they'd have power to post invites
ctx = await self.bot.get_context(message)
if Utils.is_bot_admin(ctx): return None # We are immune!
# At this point - we need to delete the message
return { 'Ignore' : True, 'Delete' : True}
@commands.command()
async def removeinvitelinks(self, ctx, *, yes_no = None):
"""Enables/Disables auto-deleting discord invite links in chat (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Remove discord invite links","RemoveInviteLinks",yes_no))
@commands.command()
async def setuserparts(self, ctx, member : discord.Member = None, *, parts : str = None):
"""Set another user's parts list (owner only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
return await ctx.send(msg)
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
return await ctx.send(msg)
if member == None:
msg = 'Usage: `{}setuserparts [member] "[parts text]"`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
try:
member = discord.utils.get(ctx.guild.members, name=member)
except:
return await ctx.send("That member does not exist")
if not parts:
parts = ""
self.settings.setGlobalUserStat(member, "Parts", parts)
msg = '*{}\'s* parts have been set to:\n{}'.format(DisplayName.name(member), parts)
await ctx.send(Utils.suppressed(ctx,msg))
@setuserparts.error
async def setuserparts_error(self, error, ctx):
# do stuff
msg = 'setuserparts Error: {}'.format(error)
await ctx.send(msg)
@commands.command()
async def ignore(self, ctx, *, member = None):
"""Adds a member to the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
msg = 'Usage: `{}ignore [member]`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
return await ctx.send('*{}* is already being ignored.'.format(DisplayName.name(member)))
# Let's ignore someone
ignoreList.append({ "Name" : member.name, "ID" : member.id })
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
await ctx.send('*{}* is now being ignored.'.format(DisplayName.name(member)))
@ignore.error
async def ignore_error(self, error, ctx):
# do stuff
msg = 'ignore Error: {}'.format(error)
await ctx.send(msg)
@commands.command()
async def listen(self, ctx, *, member = None):
"""Removes a member from the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
return await ctx.send('Usage: `{}listen [member]`'.format(ctx.prefix))
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
ignoreList.remove(user)
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
return await ctx.send("*{}* is no longer being ignored.".format(DisplayName.name(member)))
await ctx.send('*{}* wasn\'t being ignored...'.format(DisplayName.name(member)))
@listen.error
async def listen_error(self, error, ctx):
# do stuff
msg = 'listen Error: {}'.format(error)
await ctx.send(msg)
@commands.command()
async def ignored(self, ctx):
"""Lists the users currently being ignored."""
ignoreArray = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
promoSorted = sorted(ignoreArray, key=itemgetter('Name'))
if not len(promoSorted):
return await ctx.send("I'm not currently ignoring anyone.")
ignored = ["*{}*".format(DisplayName.name(ctx.guild.get_member(int(x["ID"])))) for x in promoSorted if ctx.guild.get_member(int(x["ID"]))]
await ctx.send("Currently Ignored Users:\n{}".format("\n".join(ignored)))
async def kick_ban(self, ctx, members_and_reason = None, command_name = "kick"):
# Helper method to handle the lifting for kick and ban
if not await Utils.is_bot_admin_reply(ctx): return
if not members_and_reason:
return await ctx.send('Usage: `{}{} [space delimited member mention/id] [reason]`'.format(ctx.prefix, command_name))
# Force a mention - we don't want any ambiguity
args = members_and_reason.split()
# Get our list of targets
targets = []
missed = []
unable = []
reason = ""
days = self.settings.getServerStat(ctx.guild,"BanMessageRemoveDays",1) if command_name == "ban" else None
try: days = int(days)
except: days = None
footer = "Message Removal: {:,} day{}".format(days,"" if days==1 else "s") if command_name == "ban" else None
for index,item in enumerate(args):
if self.mention_re.search(item): # Check if it's a mention
# Resolve the member
mem_id = int(re.sub(r'\W+', '', item))
member = ctx.guild.get_member(mem_id)
if member is None and command_name in ("ban","unban"): # Didn't get a valid member, let's allow a pre-ban/unban if we can resolve them
try: member = await self.bot.fetch_user(mem_id)
except: pass
# If we have an invalid mention, save it to report later
if member is None:
missed.append(str(mem_id))
continue
# Let's check if we have a valid member and make sure it's not:
# 1. The bot, 2. The command caller, 3. Another bot-admin/admin
if isinstance(member, discord.Member) and (member.id == self.bot.user.id or member.id == ctx.author.id or Utils.is_bot_admin(ctx,member)):
unable.append(member.mention)
continue
if not member in targets: targets.append(member) # Only add them if we don't already have them
else:
# Check if we're banning - and if so, check the rest of the args for `-r=#`
# then apply that override and remove from the reason
if command_name == "ban":
for i,x in enumerate(args[index:]):
if self.removal.match(x):
try:
days = int(x.split("=")[-1])
assert 0<=days<8
except:
continue
args.pop(index+i)
footer="Message Removal Override: {:,} day{}".format(days,"" if days==1 else "s")
break
# Bail if we don't have any args left for a reason
if index >= len(args): break
# Not a mention - must be the reason, dump the rest of the items into a string
# separated by a space
reason = " ".join(args[index:])
break
reason = reason if len(reason) else "No reason provided."
if not len(targets):
msg = "**With reason:**\n\n{}{}{}".format(
reason,
"" if not len(missed) else "\n\n**Unmatched ID{}:**\n\n{}".format("" if len(missed) == 1 else "s", "\n".join(missed)),
"" if not len(unable) else "\n\n**Unable to {}:**\n\n{}".format(command_name,"\n".join(unable))
)
return await Message.EmbedText(title="No valid members passed!",description=msg,color=ctx.author,footer=footer).send(ctx)
# We should have a list of targets, and the reason - let's list them for confirmation
# then generate a 4-digit confirmation code that the original requestor needs to confirm
# in order to follow through
confirmation_code = "".join([str(random.randint(0,9)) for x in range(4)])
msg = "**To {} the following member{}:**\n\n{}\n\n**With reason:**\n\n\"{}\"\n\n**Please type:**\n\n`{}`{}{}".format(
command_name,
"" if len(targets) == 1 else "s",
"\n".join([x.name+"#"+x.discriminator for x in targets]),
reason if len(reason) else "None",
confirmation_code,
"" if not len(missed) else "\n\n**Unmatched ID{}:**\n\n{}".format("" if len(missed) == 1 else "s", "\n".join(missed)),
"" if not len(unable) else "\n\n**Unable to {}:**\n\n{}".format(command_name,"\n".join(unable))
)
confirmation_message = await Message.EmbedText(title="{} Confirmation".format(command_name.capitalize()),description=msg,color=ctx.author,footer=footer).send(ctx)
def check_confirmation(message):
return message.channel == ctx.channel and ctx.author == message.author # Just making sure it's the same user/channel
try: confirmation_user = await self.bot.wait_for('message', timeout=60, check=check_confirmation)
except: confirmation_user = ""
# Delete the confirmation message
await confirmation_message.delete()
# Verify the confirmation
if not confirmation_user.content == confirmation_code: return await ctx.send("{} cancelled!".format(command_name.capitalize()))
# We got the authorization!
message = await Message.EmbedText(title="{}ing...".format("Bann" if command_name == "ban" else "Unbann" if command_name == "unban" else "Kick"),color=ctx.author,footer=footer).send(ctx)
canned = []
cant = []
command = {"ban":ctx.guild.ban,"kick":ctx.guild.kick,"unban":ctx.guild.unban}.get(command_name.lower(),ctx.guild.kick)
for target in targets:
try:
args = {"reason":"{}#{}: {}".format(ctx.author.name,ctx.author.discriminator,reason)}
if days is not None: args["delete_message_days"] = days
await command(target,**args)
canned.append(target)
except:
cant.append(target)
msg = ""
if len(canned):
msg += "**I was ABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in canned]))
if len(cant):
msg += "**I was UNABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in cant]))
await Message.EmbedText(title="{} Results".format(command_name.capitalize()),description=msg,footer=footer).edit(ctx,message)
@commands.command(aliases=["yeet"])
async def kick(self, ctx, *, members = None, reason = None):
"""Kicks the passed members for the specified reason.
All kick targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $kick @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"kick")
@commands.command(aliases=["yote"])
async def ban(self, ctx, *, members = None, reason = None):
"""Bans the passed members for the specified reason.
All ban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming
Can take r=#, rem=#, remove=# or removal=# (optionally prefixed with -) within the reason to specify the number of days worth of the banned users' messages to remove.
This is limited to 0-7 days, and will override the value set by the rembanmessages command.
eg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming -rem=5"""
await self.kick_ban(ctx,members,"ban")
@commands.command()
async def unban(self, ctx, *, members = None, reason = None):
"""Unbans the passed members for the specified reason.
All unban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $unban @user1#1234 @user2#5678 @user3#9012 because we're nice"""
await self.kick_ban(ctx,members,"unban")
@commands.command()
async def banned(self, ctx, *, user_id = None):
"""Queries the guild's ban list for the passed user id and responds with whether they've been banned and the reason.
Use with no user_id to show all bans and reasons (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
try: all_bans = await ctx.guild.bans()
except: return await ctx.send("I couldn't get the ban list :(")
if not len(all_bans): return await Message.EmbedText(title="Ban List",description="No bans found",color=ctx.author).send(ctx)
orig_user = user_id
try: user_id = int(user_id) if user_id != None else None
except: user_id = -1 # Use -1 to indicate unresolved
entries = []
for ban in all_bans:
entries.append({"name":"{}#{} ({})".format(ban.user.name,ban.user.discriminator,ban.user.id),"value":ban.reason if ban.reason else "No reason provided"})
if user_id != None and user_id == ban.user.id:
# Got a match - display it
return await Message.Embed(
title="Ban Found For {}".format(user_id),
fields=[entries[-1]], # Send the last found entry
color=ctx.author
).send(ctx)
if orig_user is None:
# Just passed None - show the whole ban list
return await PickList.PagePicker(title="Ban List ({:,} total)".format(len(entries)),list=entries,ctx=ctx).pick()
# We searched for something and didn't find it
return await Message.Embed(title="Ban List ({:,} total)".format(len(entries)),description="No match found for '{}'.".format(orig_user),color=ctx.author).send(ctx)
@commands.command()
async def rembanmessages(self, ctx, number_of_days = None):
"""Gets or sets the default number of days worth of messages to remove when banning a user. Must be between 0-7 and uses a default of 1 (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if number_of_days == None: # No setting passed, just output the current
days = self.settings.getServerStat(ctx.guild,"BanMessageRemoveDays",1)
return await ctx.send("Banning a user will remove {:,} day{} worth of messages.".format(days,"" if days==1 else "s"))
# Try to cast the days as an int - and ensure they're between 0 and 7
try:
days = int(number_of_days)
assert 0<=days<8
except:
return await ctx.send("Number of days must be an integer between 0 and 7!")
# At this point, we should have the default number of days - let's tell the user!
self.settings.setServerStat(ctx.guild,"BanMessageRemoveDays",days)
return await ctx.send("Banning a user will now remove {:,} day{} worth of messages.".format(days,"" if days==1 else "s"))
|
|
#!/usr/bin/env python2
# RDPy
#
# Copyright (C) 2013 Jeffrey Stiles (@uth_cr33p)(jeff@aerissecure.com)
#
# Todo:
# - switch to asn1tinydecoder.py: http://getreu.net/public/downloads/software/ASN1_decoder/README.html
#
# Thank you to:
# http://labs.portcullis.co.uk/application/rdp-sec-check/
# http://troels.arvin.dk/code/nagios/check_x224
#
# RDP Protocol Data Unit (PDU) Secifications
# see PDF document: http://tinyurl.com/mefmmo9
#
# COMPLETE X.224 INFORMATION
#
# x224ConnectionRequestPDU
# |0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|
# | tpktHeader |
# | x224Crq |
# | ... | rdpNegData |
# | ... |
# | ... | |
# | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
# tpktHeader (4 bytes): TPKT Header
# big endian(>):
# version(B)
# reserved(B)
# length(H)
# x224Crq (7 bytes): Connection Request Transport Protocol Data Unit
# big endian(>):
# length(B)
# cr - connection request code(B)
# dst-ref(H)
# src-ref(H)
# co - class option(B)
# rdpNegData(RDP_NEG_REQ) (8 bytes):
# little endian(<):
# type(B)
# flags(B)
# length(H)
# rp - requestedProtocols(I)
#
# x224ConnectionConfirmPDU
# |0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|
# | tpktHeader |
# | x224Ccf |
# | ... | routingToken |
# | ... |
# | cookie |
# | ... |
# | rdpNegData |
# | ... |
# | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
# tpktHeader (4 bytes): TPKT Header
# big endian(>):
# version(B)
# reserved(B)
# length(H)
# x224Ccf (7 bytes): Connection Confirm Transport Protocol Data Unit
# big endian(>):
# length(B)
# cc - connection confirm code(B)
# dst-ref(H)
# src-ref(H)
# co - class option(B)
# routingToken (optional)
# cookie (optional)
# rdpNegData(RDP_NEG_RSP) (8 bytes):
# little endian(<):
# type(B)
# flags(B)
# length(H)
# sp - selectedProtocol(I)
# rdpNegData(RDP_NEG_FAILURE) (8 bytes):
# little endian(<):
# type(B)
# flags(B)
# length(H)
# fc - failureCode(I)
#
# INCOMPLETE MCS INFORMATION
#
# 2.2.1.3 (p.38)
# MCSConnectIntialPDU
# |0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|
# | tpktHeader |
# | x224data | mcsCi |
# | ... |
# ... etc
#
#
# 2.2.1.4 (p.55)
# MCSConnectResposePDU
# |0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|
# | tpktHeader |
# | x224data | mcsCrsp |
# | ... |
# ... etc
# tpktHeader (4 bytes)
# x224Data (3 bytes)
# mcsCrsp (variable)(BER)
# gccCCrsp (variable)(PER)
# serverCoreData(TS_UD_SC_COR) (12 bytes)
# header(HH), type(H)(\x0c\x02), length(H)(..)
# version(I)
# clientRequestedProtocols(I)
# earlyCapabilityFlags(I)
# serverNetworkData (variable)
# serverSecurityData (variable)
# header(HH), type(H)(\x02\x0c), length(H)(..)
# encryptionMethod(I)
# encryptionLevel(I)
# serverRandomLen(I)
# serverCertLen(I)
# ...
# serverMessageChannelData (8 bytes)
# serverMultitransportChannelData (8 bytes)
import argparse
import socket
import struct
import time
import re
from pyasn1.codec.ber import decoder
# custom errors
class ConnectionError(Exception):
"""Generic connection error"""
pass
class ResponseError(Exception):
"""Generic response error"""
pass
# protocols
PROTOCOL_OLD = -1 # Old RDP Protocol (Win XP/2000/2003)
PROTOCOL_RDP = 0 # Standard RDP security
PROTOCOL_SSL = 1 # TLS
PROTOCOL_HYBRID = 2 # CredSSP (requires PROTOCOL_SSL (3))
PROTOCOL_SSL_HYBRID = 3 # PROTOCOL_SSL + PROTOCOL_HYBRID
PROTOCOL_HYBRID_EX = 8 # CredSSP EX (requires PROTOCOL_HYBRID (10))
PROTOCOL_HYBRID_HYBRID_EX = 10 # PROTOCOL_HYBRID + PROTOCOL_HYBRID_EX
# 10 FOR PROTOCOL_HYBRID_EX (requires PROTOCOL_HYBRID)
# 3 FOR PROTOCOL_HYBRID (requires PROTOCOL_SSL)
LU_PROTOCOL = {
PROTOCOL_OLD: 'rdpNegData ignored (Windows 2000/XP/2003?)', # rdpNegData supplied but returned empty
PROTOCOL_RDP: 'Standard RDP Security',
PROTOCOL_SSL: 'TLS 1.0, 1.1 or 1.2 Security',
PROTOCOL_HYBRID: 'Hybrid (TLS + CredSSP) Security',
PROTOCOL_SSL_HYBRID: 'Hybrid (TLS + CredSSP) Security',
PROTOCOL_HYBRID_EX: 'Hybrid (TLS + CredSSP EX) Security',
PROTOCOL_HYBRID_HYBRID_EX: 'Hybrid (TLS + CredSSP EX) Security',
}
# negotiation codes
RDP_NEG_TYPE_REQ = 1 # Code for request
RDP_NEG_TYPE_RSP = 2 # Code for response
RDP_NEG_TYPE_FAILURE = 3 # Code for response failure
NEG_TYPE = {
RDP_NEG_TYPE_REQ: 'RDP Negotiation Request',
RDP_NEG_TYPE_RSP: 'RDP Negotiation Response',
RDP_NEG_TYPE_FAILURE: 'RDP Negotiation Failure',
}
# failure codes
SSL_REQUIRED_BY_SERVER = 1
SSL_NOT_ALLOWED_BY_SERVER = 2
SSL_CERT_NOT_ON_SERVER = 3
INCONSISTENT_FLAGS = 4
HYBRID_REQUIRED_BY_SERVER = 5
SSL_WITH_USER_AUTH_REQUIRED_BY_SERVER = 6
LU_FAILURE_CODE = {
SSL_REQUIRED_BY_SERVER: 'SSL REQUIRED BY SERVER',
SSL_NOT_ALLOWED_BY_SERVER: 'SSL NOT ALLOWED BY SERVER',
SSL_CERT_NOT_ON_SERVER: 'SSL CERT NOT ON SERVER',
INCONSISTENT_FLAGS: 'INCONSISTENT FLAGS',
HYBRID_REQUIRED_BY_SERVER: 'HYBRID REQUIRED BY SERVER',
SSL_WITH_USER_AUTH_REQUIRED_BY_SERVER: 'SSL WITH USER AUTH REQUIRED BY SERVER',
}
# encryption levels
ENCRYPTION_LEVEL_NONE = 0
ENCRYPTION_LEVEL_LOW = 1
ENCRYPTION_LEVEL_CLIENT_COMPATIBLE = 2
ENCRYPTION_LEVEL_HIGH = 3
ENCRYPTION_LEVEL_FIPS = 4
LU_ENCRYPTION_LEVEL = {
ENCRYPTION_LEVEL_NONE: 'None',
ENCRYPTION_LEVEL_LOW: 'Low',
ENCRYPTION_LEVEL_CLIENT_COMPATIBLE: 'Client Compatible',
ENCRYPTION_LEVEL_HIGH: 'High',
ENCRYPTION_LEVEL_FIPS: 'FIPS',
}
# encryption methods
ENCRYPTION_METHOD_NONE = 0
ENCRYPTION_METHOD_40BIT = 1
ENCRYPTION_METHOD_128BIT = 2
ENCRYPTION_METHOD_56BIT = 8
ENCRYPTION_METHOD_FIPS = 10
LU_ENCRYPTION_METHOD = {
ENCRYPTION_METHOD_NONE: 'None',
ENCRYPTION_METHOD_40BIT: '40 Bit',
ENCRYPTION_METHOD_128BIT: '128 Bit',
ENCRYPTION_METHOD_56BIT: '56 Bit',
ENCRYPTION_METHOD_FIPS: 'FIPS',
}
# server versions
SERVER_VERSION_4 = 524289 # 0x00080001
SERVER_VERSION_5 = 524292 # 0x00080004
LU_SERVER_VERSION = {
SERVER_VERSION_4: 'RDP 4.0 servers',
SERVER_VERSION_5: 'RDP 5.0, 5.1, 5.2, 6.0, 6.1, 7.0, 7.1, and 8.0 servers',
}
# Denial of Service (DoS), Man-in-the-Middle (MitM), Weak Encryption
# configuration issues
NLA_SUPPORTED_BUT_NOT_MANDATED_DOS = 0 # Network Level Authentication (NLA) (passes creds instead of prompting after connect)
NLA_NOT_SUPPORTED_DOS = 1
SSL_SUPPORTED_BUT_NOT_MANDATED_MITM = 2
ONLY_RDP_SUPPORTED_MITM = 3
WEAK_RDP_ENCRYPTION_SUPPORTED = 4
NULL_RDP_ENCRYPTION_SUPPORTED = 5
FIPS_SUPPORTED_BUT_NOT_MANDATED = 6
LU_ISSUES = {
NLA_SUPPORTED_BUT_NOT_MANDATED_DOS: 'NLA supported but not mandated DoS',
NLA_NOT_SUPPORTED_DOS: 'NLA not supported DoS',
SSL_SUPPORTED_BUT_NOT_MANDATED_MITM: 'SSL supported but not mandated MitM',
ONLY_RDP_SUPPORTED_MITM: 'Only RDP supported MitM',
WEAK_RDP_ENCRYPTION_SUPPORTED: 'Weak RDP encryption supported',
NULL_RDP_ENCRYPTION_SUPPORTED: 'Null RDP encryption supported',
FIPS_SUPPORTED_BUT_NOT_MANDATED: 'FIPS supported but not mandated',
}
#may not only be for x224 connection, maybe rename to RDPSocket
class RDPSocket:
"""Socket object for submitting requests to RDP server"""
def __init__(self, hostname, port=3389, timeout=10):
self.hostname = hostname
self.port = port
self.timeout = timeout
self.connect()
def connect(self):
"""Open socket"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)# defaults
self.s.settimeout(self.timeout)
try:
self.s.connect((self.hostname, self.port))
except (socket.error, socket.timeout), e:
if e[0] == 111 or e[0] == 'timed out': # 'timed out' is when settimeout is used
e = ConnectionError('RDP server not listening: %s:%s' % (self.hostname, self.port))
raise ConnectionError('Could not set up connection: %s' % e)
def send(self, pdu):
"""Send specified PDU"""
try:
sbytes = self.s.send(pdu)
if sbytes != len(pdu):
raise ConnectionError('Could not send RDP payload')
return self.s.recv(1024)
# if no error, can continue to send
except socket.error, e:
if e[0] == 104:
raise ConnectionError('Bad request or protocol not supported: %s' % e)
def disconnect(self, s, pdu=None):
"""Send disconnect request"""
try:
pdu = x224DisconnectRequestPDU().pdu if not pdu else pdu
sbytes = s.send(pdu)
if sbytes != len(pdu):
raise ConnectionError('Could not send RDP disconnection payload')
s.close()
except socket.error, e:
raise ConnectionError('Error sending disconnect request: %s' % e)
# x224ConnectionRequest, x224ConnectionConfirm, MCSConnectInitial, MCSConnectResponse
class tpktHeader: # ln required for object contruction
"""TPKT Header used in connection requests and connection confirmations"""
def __init__(self, resp=None, ver=3, res=0, ln=0):
self.enc = '>BBH' # (>) - big endian
self.ver = ver # (B) - version
self.res = res # (B) - reserved
self.ln = ln # (H) - length(entire PDU)
if resp:
self.unpack(resp)
def pack(self):
"""Pack attrs to C structure"""
return struct.pack(self.enc, self.ver, self.res, self.ln)
def unpack(self, resp):
"""Unpack C structure to attrs"""
self.ver, self.res, self.ln = struct.unpack(self.enc, resp)
# x224ConnectionRequestPDU
class RDP_NEG_REQ:
"""RDP negotiation request used in connection requests"""
def __init__(self, type=1, flags=0, ln=8, rp=0):
self.enc = '<BBHI' # (<) - little endian
self.type = type # (B) - type
self.flags = flags # (B) - flags
self.ln = ln # (H) - length
self.rp = rp # (I) - requested protocols
def pack(self):
"""Pack attrs to C structure"""
return struct.pack(self.enc, self.type, self.flags, self.ln, self.rp)
# x224ConnectionRequestPDU
class x224Crq:
"""Connection request transport PDU used in connection requests"""
def __init__(self, ln=14, cr=224, dst_ref=0, src_ref=0, co=0):
# cookie omitted
self.enc = '>BBHHB' # (>) big endian
self.ln = ln # (B) - length (6=len this header, 8=len rdpNegData)
self.cr = cr # (B) - connection request confirm code
self.dst_ref = dst_ref # (H) - requested transport
self.src_ref = src_ref # (H) - selected transport
self.co = co # (B) - class option
def pack(self):
return struct.pack(self.enc, self.ln, self.cr, self.dst_ref, self.src_ref, self.co)
# x224ConnectionConfirmPDU
class RDP_NEG_RSP:
"""RDP negotiation response used in connection responses"""
def __init__(self, resp):
self.enc = '<BBHI' # (<) - little endian
# (B)type, (B)flats, (H)ln - length, (I)sp - selected protocol
self.type, self.flags, self.ln, self.sp = struct.unpack(self.enc, resp)
def sp_display(self):
pass
# x224ConnectionConfirmPDU
class RDP_NEG_FAILURE:
"""RDP negotiation failure used in connection responses"""
def __init__(self, resp):
self.enc = '<BBHI' # (<) - little endian
# (B)type, (B)flats, (H)ln - length, (I)fc - failure code
self.type, self.flags, self.ln, self.fc = struct.unpack(self.enc, resp)
# x224ConnectionConfirmPDU
class x224Ccf:
"""Connection confirmation transport PDU used in connection responses"""
def __init__(self, resp):
self.enc = '>BBHHB' # (>) big endian
# (B)ln - length, (B)cc - connection code, dst_ref(H), src_ref(H), co(B) - class option
self.ln, self.cc, self.dst_ref, self.src_ref, self.co = struct.unpack(self.enc, resp)
class TS_UD_SC_SEC1:
"""Server Security Data used in MCS Connect Response PDU"""
def __init__(self, resp):
self.enc = '<IIII' # (<) little endian
# header = \x02\x0c + (H)length
# ommitted - (I)header
# (I)em - encryptionmethod, (I)el - encryption length, (I)random length, (I)cl - certificate length
self.em, self.el, self.rl, self.cl = struct.unpack(self.enc, resp)
def em_display(self):
return LU_ENCRYPTION_METHOD.get(self.em)
def el_display(self):
return LU_ENCRYPTION_LEVEL.get(self.el)
# Requests:
class x224ConnectionRequest:
"""X.224 Connection Request PDU"""
def __init__(self, rp=0): ## add options inputs
tpktheader_len = 4 # fixed length
self.x224crq = x224Crq()
self.rdp_neg_data = RDP_NEG_REQ(rp=rp)
self.x224_body = self.x224crq.pack() + self.rdp_neg_data.pack()
self.tpktheader = tpktHeader(ln=len(self.x224_body) + tpktheader_len).pack()
self.pdu = self.tpktheader + self.x224_body
class x224DisconnectRequest:
"""X.224 Disconnect Request PDU"""
def __init__(self):
tpktheader_len = 4 # fixed length
# for Crq reduce the length from omitted rdp_neg_data and include 128 diconnect code
self.x224crq = x224Crq(ln=6, cr=128)
self.x224_body = self.x224crq.pack() # no rdp_neg_data
self.tpktheader = tpktHeader(ln=len(self.x224_body) + tpktheader_len).pack()
self.pdu = self.tpktheader + self.x224_body
# no RDP Security Layer if connection is refused (due to hybrid support only
class x224BasicRequest:## could make x224ConnectionRequest more generic to incorporate this
"""X.224 Connection Request PDU without rdpNegData"""
def __init__(self):
tpktheader_len = 4 # fixed length
# for Crq reduce the length from omitted rdp_neg_data and include 128 diconnect code
self.x224crq = x224Crq(ln=6)
self.x224_body = self.x224crq.pack() # no rdp_neg_data
self.tpktheader = tpktHeader(ln=len(self.x224_body) + tpktheader_len).pack()
self.pdu = self.tpktheader + self.x224_body
class MCSConnectInitial:
"""MCS Connect Initial PDU"""
def __init__(self, encryption_method):
# Client MCS Connection Request does not specify encryption_level
# grabbed from observed requests instead of constructing from scratch
pdu = '\x03\x00\x01\xa2\x02\xf0\x80\x7f\x65\x82\x01\x96\x04\x01\x01\x04\x01'
pdu += '\x01\x01\x01\xff\x30\x20\x02\x02\x00\x22\x02\x02\x00\x02\x02\x02\x00'
pdu += '\x00\x02\x02\x00\x01\x02\x02\x00\x00\x02\x02\x00\x01\x02\x02\xff\xff'
pdu += '\x02\x02\x00\x02\x30\x20\x02\x02\x00\x01\x02\x02\x00\x01\x02\x02\x00'
pdu += '\x01\x02\x02\x00\x01\x02\x02\x00\x00\x02\x02\x00\x01\x02\x02\x04\x20'
pdu += '\x02\x02\x00\x02\x30\x20\x02\x02\xff\xff\x02\x02\xfc\x17\x02\x02\xff'
pdu += '\xff\x02\x02\x00\x01\x02\x02\x00\x00\x02\x02\x00\x01\x02\x02\xff\xff'
pdu += '\x02\x02\x00\x02\x04\x82\x01\x23\x00\x05\x00\x14\x7c\x00\x01\x81\x1a'
pdu += '\x00\x08\x00\x10\x00\x01\xc0\x00\x44\x75\x63\x61\x81\x0c\x01\xc0\xd4'
pdu += '\x00\x04\x00\x08\x00\x20\x03\x58\x02\x01\xca\x03\xaa\x09\x04\x00\x00'
pdu += '\x28\x0a\x00\x00\x68\x00\x6f\x00\x73\x00\x74\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xca\x01\x00\x00\x00\x00'
pdu += '\x00\x18\x00\x07\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
pdu += '\x00\x00\x00\x00\x00\x04\xc0\x0c\x00\x09\x00\x00\x00\x00\x00\x00\x00'
# insert encryption type into the PDU
pdu += ('\x02\xc0\x0c\x00%s\x00\x00\x00\x00\x00\x00\x00\x03\xc0\x20\x00\x02'
% struct.pack('<B', encryption_method))
pdu += '\x00\x00\x00\x63\x6c\x69\x70\x72\x64\x72\x00\xc0\xa0\x00\x00\x72\x64'
pdu += '\x70\x64\x72\x00\x00\x00\x80\x80\x00\x00'
self.pdu = pdu
# Responses:
class x224ConnectionConfirm:# response to X.224 connection
"""X.224 Connection Confirm PDU"""
def __init__(self, resp):
self.resp = resp
self.ln = len(resp) # use as check for response type
if len(resp) != 11 and len(resp) != 19:
raise ResponseError('X.224 connection confirm PDU of unexpected length (%d)' % self.ln)
self.tpktheader = tpktHeader(resp[0:4])
self.x224ccf = x224Ccf(resp[4:11])
# see 3.3.5.3.2, rpd_ned_data SHOULD be returned, but not always
if len(resp) == 11: #(4)tpktHeader, (7)x224Ccf
self.rdp_neg_data = None
if len(resp) == 19: # (4)tpktHeader, (7)x224Ccf, (8)RDP_NEG_RSP/RDP_NEG_FAILURE
# check RDP NEG type
rdp_neg_type = struct.unpack('<B', resp[11])[0]
# self.rdp_neg_data may be RDP_NEG_RSP or RDP_NEG_FAILURE object
if rdp_neg_type == RDP_NEG_TYPE_RSP:
self.rdp_neg_data = RDP_NEG_RSP(resp[11:19])
elif rdp_neg_type == RDP_NEG_TYPE_FAILURE:
self.rdp_neg_data = RDP_NEG_FAILURE(resp[11:19])
else:
raise ResponseError('Unknown RDP_NEG_TYPE (%d)' % rdp_neg_type)
class MCSConnectResponse:#response to MCS connection
"""MCS Connect Response PDU"""
def __init__(self, resp):
self.resp = resp
self.ln = len(resp) # use as check for response type
self.tpktheader = tpktHeader(resp[0:4])
#self.x224data = x224Data(resp[4:7] # not yet implemented
if self.ln < 8:
raise ResponseError('MCS response of unexpected length (%d)' % self.ln)
self.decoded_resp = decoder.decode(resp[7:])[1]
# self.decoded_resp
try:
security_data = re.search("\x02\x0c..(.{16})", self.decoded_resp, re.DOTALL).groups()[0]
self.ts_ud_sc_sec1 = TS_UD_SC_SEC1(security_data)
except AttributeError, e:
raise ResponseError('Unknown error regexing TS_UD_SC_SEC1: %s' % e)
## figure out what to do with this and x224BasicRequest
def classic_rdp_security_support(rdpsocket):
"""True if supports MCS and encryption, this is a basic connection without NEG data"""
try:
cr = x224BasicRequest()
rdpsocket.connect()
resp = rdpsocket.send(cr.pdu)
cc = x224ConnectionConfirm(resp)
return True if cc.x224ccf.cc == 208 else False ## maybe don't check for 208, just connection
except ConnectionError: # bad request, not bad connection
return False
## abstract the protocol tests
## is there a test for hybrid_ex?
def protocol_rdp_support(rdpsocket):
"""(True, 0) if supports RDP Security"""
try:
cr = x224ConnectionRequest(rp=PROTOCOL_RDP)
rdpsocket.connect()
resp = rdpsocket.send(cr.pdu)
cc = x224ConnectionConfirm(resp)
if not cc.rdp_neg_data: # handle response with nordp_neg_data
return (False, PROTOCOL_OLD)
supported = True if cc.rdp_neg_data.type == RDP_NEG_TYPE_RSP else False
return (supported, cc.rdp_neg_data.sp) if supported else (supported, cc.rdp_neg_data.fc)
except ConnectionError: # bad socket.send, not bad socket.connect
return (False, None)
def protocol_ssl_support(rdpsocket):
"""(True, 1) if supports TLS security"""
try:
cr = x224ConnectionRequest(rp=PROTOCOL_SSL)
rdpsocket.connect()
resp = rdpsocket.send(cr.pdu)
cc = x224ConnectionConfirm(resp)
if not cc.rdp_neg_data: # handle response with nordp_neg_data
return (False, PROTOCOL_OLD)
supported = True if cc.rdp_neg_data.type == RDP_NEG_TYPE_RSP else False
return (supported, cc.rdp_neg_data.sp) if supported else (supported, cc.rdp_neg_data.fc)
except ConnectionError: # bad socket.send, not bad socket.connect
return (False, None)
def protocol_hybrid_support(rdpsocket):
"""(True, 3) if supports Hybrid"""
try:
cr = x224ConnectionRequest(rp=PROTOCOL_SSL_HYBRID)
rdpsocket.connect()
resp = rdpsocket.send(cr.pdu)
cc = x224ConnectionConfirm(resp)
if not cc.rdp_neg_data: # handle response with nordp_neg_data
return (False, PROTOCOL_OLD)
supported = True if cc.rdp_neg_data.type == RDP_NEG_TYPE_RSP else False
return (supported, cc.rdp_neg_data.sp) if supported else (supported, cc.rdp_neg_data.fc)
except ConnectionError: # bad socket.send, not bad socket.connect
return (False, None)
def protocol_support(rdpsocket):
"""Conglomeration of protocol tests"""
protocols = []
failure_codes = []
for test in [protocol_rdp_support, protocol_ssl_support, protocol_hybrid_support]:
supported, type = test(rdpsocket)
if supported and type not in protocols:
protocols.append(type)
elif not supported and type not in failure_codes and type in LU_FAILURE_CODE:
failure_codes.append(type)
elif not supported and type == PROTOCOL_OLD and type not in protocols:
protocols.append(type)
return protocols, failure_codes
def encryption_support(rdpsocket):
methods = []
levels = []
for em in LU_ENCRYPTION_METHOD:
try:
rdpsocket.connect()
resp = rdpsocket.send(x224BasicRequest().pdu)
# check for response length 11
x224ConnectionConfirm(resp)
resp = rdpsocket.send(MCSConnectInitial(em).pdu)
mcsr = MCSConnectResponse(resp)
if mcsr.ts_ud_sc_sec1.em not in methods:
methods.append(mcsr.ts_ud_sc_sec1.em)
if mcsr.ts_ud_sc_sec1.el not in levels:
levels.append(mcsr.ts_ud_sc_sec1.el)
except ConnectionError:
pass # do nothing, should be an unsupported request
except ResponseError:
raise # something went wrong
return methods, levels
class RDPConfig:
"""RDP configuration representing queried data"""
def __init__(self, hostname, port=3389, timeout=10):
self.hostname = hostname
self.port = port
self.timeout = timeout
self.protocols = []
self.failure_codes = []
self.encryption_methods = []
self.encryption_levels = []
self.issues = []
# Try to connect
try:
self.rdpsocket = RDPSocket(self.hostname, self.port, )
self.alive = True
except ConnectionError:
self.alive = False
def run_tests(self):
"""don't run rests unless alive"""
if not self.alive:
return
# get protocol info
self.protocols, self.failure_codes = protocol_support(self.rdpsocket)
# get encryption info
self.encryption_methods, self.encryption_levels = encryption_support(self.rdpsocket)
# get issue info
self.issues = []
if PROTOCOL_HYBRID in self.protocols: #NLA DoS
# see: http://en.wikipedia.org/wiki/Network_Level_Authentication
if PROTOCOL_RDP in self.protocols or PROTOCOL_SSL in self.protocols:
self.issues.append(NLA_SUPPORTED_BUT_NOT_MANDATED_DOS)
else:
self.issues.append(NLA_NOT_SUPPORTED_DOS)
if PROTOCOL_RDP in self.protocols:
if (PROTOCOL_SSL or PROTOCOL_HYBRID) in self.protocols:
self.issues.append(SSL_SUPPORTED_BUT_NOT_MANDATED_MITM)
else:
self.issues.append(ONLY_RDP_SUPPORTED_MITM)
if (ENCRYPTION_METHOD_40BIT or ENCRYPTION_METHOD_56BIT) in self.encryption_methods:
self.issues.append(WEAK_RDP_ENCRYPTION_SUPPORTED)
if ENCRYPTION_METHOD_NONE in self.encryption_methods:
self.issues.append(NULL_RDP_ENCRYPTION_SUPPORTED)
if ENCRYPTION_METHOD_FIPS in self.encryption_methods and len(self.encryption_methods) > 1:
self.issues.append(FIPS_SUPPORTED_BUT_NOT_MANDATED)
def results(self, fmt=None):
print 'Target: %s' % self.hostname
print 'Port: %s' % self.port if not self.port == 3389 else 'Port: 3389 (default)'
print 'Host Status: UP' if self.alive else 'Host Status: DOWN'
print
if not self.alive:
return
print '[+] Supported Protocols:'
if self.protocols:
for p in self.protocols:
print '\t%s' % LU_PROTOCOL[p]
print
else:
print '\t(None)\n'
print '[+] Supported Encryption Methods:'
if self.encryption_methods:
for em in self.encryption_methods:
print '\t%s' % LU_ENCRYPTION_METHOD[em]
print
else:
print '\t(None)\n'
print '[+] Supported Encryption Levels:'
if self.encryption_levels:
for el in self.encryption_levels:
print '\t%s' % LU_ENCRYPTION_LEVEL[el]
print
else:
print '\t(None)\n'
print '[+] Security Issues:'
if self.issues:
for i in self.issues:
print '\t%s' % LU_ISSUES[i]
print
else:
print '\t(None)\n'
print '[+] Server Messages:'
if self.failure_codes:
for fc in self.failure_codes:
print '\t%s' % LU_FAILURE_CODE[fc]
print
else:
print '\t(None)\n'
issue_descriptions = """
Title:
Remote Desktop Encryption Vulnerabilities
Description:
There are multiple issues with the Remote Desktop configuration
- NLA supported but not mandated DoS
When a Remote Desktop (RDP) connection is initiated with an RDP server that does not require Network Level Authentication (NLA), the server will establish a session with the client and present the login screen before authentication takes place. This uses up resources on the server, and is a potential area for denial of service attacks. NLA delegates the user's credentials from the client through a client side Security Support Provider Interface (SSPI) and prompts the user to authenticate before establishing a session on the server.
- SSL supported but not mandated MitM
SSL encryption adds an additional layer of validation that the server must provide to the client. However, if hostnames and legitimate certificates are not used, the client will be presented with a warning dialog box that they must acknowledge.
- Weak RDP encryption supported
One or more of the following weak encryption methods is supported: 40 Bit, 56 Bit
Solution:
Navigate to System Properties -> Remote. Select the option for "Allow connection only from computers running Remote Desktop with Network Level Authentication (more secure)".
"""
if __name__ == "__main__":
# test for now
parser = argparse.ArgumentParser()# maybe add description?
# rdpy.py [options] target(s)
parser.add_argument('--port', default=3389, type=int,
help='RDP listening port')
parser.add_argument('--timeout', default=10, type=int,
help='Connection timeout (in seconds)')
parser.add_argument('-d', '--description', action='store_true',
help='Display detailed vunlerability description information')
parser.add_argument('hostname', nargs='+', type=str)
args = parser.parse_args()
alive = 0
for h in args.hostname:
rdpc = RDPConfig(h, args.port, args.timeout)
if rdpc.alive:
alive += 1
rdpc.run_tests()
rdpc.results()
print '--------------------------------------------------\n'
print 'Total Hosts: %s' % len(args.hostname)
print 'Listening Hosts: %s' % alive
print
if args.description:
print '--------------------------------------------------\n'
print issue_descriptions
print
|
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature seletion procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
# compute centered values
# note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
# compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) -
n_samples * X_means ** 2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continious target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features *
np.arange(1, n_features + 1)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
|
# pylint: disable=too-many-lines
"""
Component to interface with cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import asyncio
import logging
from aiohttp import web
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView
DOMAIN = 'camera'
DEPENDENCIES = ['http']
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
STATE_RECORDING = 'recording'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?token={1}'
@asyncio.coroutine
# pylint: disable=too-many-branches
def async_setup(hass, config):
"""Setup the camera component."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(CameraImageView(hass, component.entities))
hass.http.register_view(CameraMjpegStream(hass, component.entities))
yield from component.async_setup(config)
return True
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
@property
def access_token(self):
"""Access token for this camera."""
return str(id(self))
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_token)
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Camera brand."""
return None
@property
def model(self):
"""Camera model."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
@asyncio.coroutine
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop.
"""
image = yield from self.hass.loop.run_in_executor(
None, self.camera_image)
return image
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = ('multipart/x-mixed-replace; '
'boundary=--jpegboundary')
response.enable_chunked_encoding()
yield from response.prepare(request)
def write(img_bytes):
"""Write image to stream."""
response.write(bytes(
'--jpegboundary\r\n'
'Content-Type: image/jpeg\r\n'
'Content-Length: {}\r\n\r\n'.format(
len(img_bytes)), 'utf-8') + img_bytes + b'\r\n')
last_image = None
try:
while True:
img_bytes = yield from self.async_camera_image()
if not img_bytes:
break
if img_bytes is not None and img_bytes != last_image:
write(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
write(img_bytes)
last_image = img_bytes
yield from response.drain()
yield from asyncio.sleep(.5)
finally:
self.hass.loop.create_task(response.write_eof())
@property
def state(self):
"""Camera state."""
if self.is_recording:
return STATE_RECORDING
elif self.is_streaming:
return STATE_STREAMING
else:
return STATE_IDLE
@property
def state_attributes(self):
"""Camera state attributes."""
attr = {
'access_token': self.access_token,
}
if self.model:
attr['model_name'] = self.model
if self.brand:
attr['brand'] = self.brand
return attr
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, hass, entities):
"""Initialize a basic camera view."""
super().__init__(hass)
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a get request."""
camera = self.entities.get(entity_id)
if camera is None:
return web.Response(status=404)
authenticated = (request.authenticated or
request.GET.get('token') == camera.access_token)
if not authenticated:
return web.Response(status=401)
response = yield from self.handle(request, camera)
return response
@asyncio.coroutine
def handle(self, request, camera):
"""Hanlde the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = "/api/camera_proxy/{entity_id}"
name = "api:camera:image"
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
image = yield from camera.async_camera_image()
if image is None:
return web.Response(status=500)
return web.Response(body=image)
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = "/api/camera_proxy_stream/{entity_id}"
name = "api:camera:stream"
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
yield from camera.handle_async_mjpeg_stream(request)
|
|
# -*- coding: utf-8 -*-
import datetime
import logging
import dateutil.parser
import dateutil.tz
from auth import Auth
from elasticsearch import RequestsHttpConnection
from elasticsearch.client import Elasticsearch
logging.basicConfig()
elastalert_logger = logging.getLogger('elastalert')
def new_get_event_ts(ts_field):
""" Constructs a lambda that may be called to extract the timestamp field
from a given event.
:returns: A callable function that takes an event and outputs that event's
timestamp field.
"""
return lambda event: lookup_es_key(event[0], ts_field)
def _find_es_dict_by_key(lookup_dict, term):
""" Performs iterative dictionary search based upon the following conditions:
1. Subkeys may either appear behind a full stop (.) or at one lookup_dict level lower in the tree.
2. No wildcards exist within the provided ES search terms (these are treated as string literals)
This is necessary to get around inconsistencies in ES data.
For example:
{'ad.account_name': 'bob'}
Or:
{'csp_report': {'blocked_uri': 'bob.com'}}
And even:
{'juniper_duo.geoip': {'country_name': 'Democratic People's Republic of Korea'}}
We want a search term of form "key.subkey.subsubkey" to match in all cases.
:returns: A tuple with the first element being the dict that contains the key and the second
element which is the last subkey used to access the target specified by the term. None is
returned for both if the key can not be found.
"""
if term in lookup_dict:
return lookup_dict, term
# If the term does not match immediately, perform iterative lookup:
# 1. Split the search term into tokens
# 2. Recurrently concatenate these together to traverse deeper into the dictionary,
# clearing the subkey at every successful lookup.
#
# This greedy approach is correct because subkeys must always appear in order,
# preferring full stops and traversal interchangeably.
#
# Subkeys will NEVER be duplicated between an alias and a traversal.
#
# For example:
# {'foo.bar': {'bar': 'ray'}} to look up foo.bar will return {'bar': 'ray'}, not 'ray'
dict_cursor = lookup_dict
subkeys = term.split('.')
subkey = ''
while len(subkeys) > 0:
subkey += subkeys.pop(0)
if subkey in dict_cursor:
if len(subkeys) == 0:
break
dict_cursor = dict_cursor[subkey]
subkey = ''
elif len(subkeys) == 0:
# If there are no keys left to match, return None values
dict_cursor = None
subkey = None
else:
subkey += '.'
return dict_cursor, subkey
def set_es_key(lookup_dict, term, value):
""" Looks up the location that the term maps to and sets it to the given value.
:returns: True if the value was set successfully, False otherwise.
"""
value_dict, value_key = _find_es_dict_by_key(lookup_dict, term)
if value_dict is not None:
value_dict[value_key] = value
return True
return False
def lookup_es_key(lookup_dict, term):
""" Performs iterative dictionary search for the given term.
:returns: The value identified by term or None if it cannot be found.
"""
value_dict, value_key = _find_es_dict_by_key(lookup_dict, term)
return None if value_key is None else value_dict[value_key]
def ts_to_dt(timestamp):
if isinstance(timestamp, datetime.datetime):
logging.warning('Expected str timestamp, got datetime')
return timestamp
dt = dateutil.parser.parse(timestamp)
# Implicitly convert local timestamps to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_ts(dt):
if not isinstance(dt, datetime.datetime):
logging.warning('Expected datetime, got %s' % (type(dt)))
return dt
ts = dt.isoformat()
# Round microseconds to milliseconds
if dt.tzinfo is None:
# Implicitly convert local times to UTC
return ts + 'Z'
# isoformat() uses microsecond accuracy and timezone offsets
# but we should try to use millisecond accuracy and Z to indicate UTC
return ts.replace('000+00:00', 'Z').replace('+00:00', 'Z')
def ts_to_dt_with_format(timestamp, ts_format):
if isinstance(timestamp, datetime.datetime):
logging.warning('Expected str timestamp, got datetime')
return timestamp
dt = datetime.datetime.strptime(timestamp, ts_format)
# Implicitly convert local timestamps to UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_ts_with_format(dt, ts_format):
if not isinstance(dt, datetime.datetime):
logging.warning('Expected datetime, got %s' % (type(dt)))
return dt
ts = dt.strftime(ts_format)
return ts
def ts_now():
return datetime.datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
def inc_ts(timestamp, milliseconds=1):
"""Increment a timestamp by milliseconds."""
dt = ts_to_dt(timestamp)
dt += datetime.timedelta(milliseconds=milliseconds)
return dt_to_ts(dt)
def pretty_ts(timestamp, tz=True):
"""Pretty-format the given timestamp (to be printed or logged hereafter).
If tz, the timestamp will be converted to local time.
Format: YYYY-MM-DD HH:MM TZ"""
dt = timestamp
if not isinstance(timestamp, datetime.datetime):
dt = ts_to_dt(timestamp)
if tz:
dt = dt.astimezone(dateutil.tz.tzlocal())
return dt.strftime('%Y-%m-%d %H:%M %Z')
def ts_add(ts, td):
""" Allows a timedelta (td) add operation on a string timestamp (ts) """
return dt_to_ts(ts_to_dt(ts) + td)
def hashable(obj):
""" Convert obj to a hashable obj.
We use the value of some fields from Elasticsearch as keys for dictionaries. This means
that whatever Elasticsearch returns must be hashable, and it sometimes returns a list or dict."""
if not obj.__hash__:
return str(obj)
return obj
def format_index(index, start, end):
""" Takes an index, specified using strftime format, start and end time timestamps,
and outputs a wildcard based index string to match all possible timestamps. """
# Convert to UTC
start -= start.utcoffset()
end -= end.utcoffset()
indexes = []
while start.date() <= end.date():
indexes.append(start.strftime(index))
start += datetime.timedelta(days=1)
return ','.join(indexes)
class EAException(Exception):
pass
def seconds(td):
return td.seconds + td.days * 24 * 3600
def total_seconds(td):
# For python 2.6 compatability
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def dt_to_int(dt):
dt = dt.replace(tzinfo=None)
return int(total_seconds((dt - datetime.datetime.utcfromtimestamp(0))) * 1000)
def unixms_to_dt(ts):
return unix_to_dt(float(ts) / 1000)
def unix_to_dt(ts):
dt = datetime.datetime.utcfromtimestamp(float(ts))
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt
def dt_to_unix(dt):
return total_seconds(dt - datetime.datetime(1970, 1, 1, tzinfo=dateutil.tz.tzutc()))
def dt_to_unixms(dt):
return dt_to_unix(dt) * 1000
def cronite_datetime_to_timestamp(self, d):
"""
Converts a `datetime` object `d` into a UNIX timestamp.
"""
if d.tzinfo is not None:
d = d.replace(tzinfo=None) - d.utcoffset()
return total_seconds((d - datetime.datetime(1970, 1, 1)))
def add_raw_postfix(field):
if not field.endswith('.raw'):
field += '.raw'
return field
def elasticsearch_client(conf):
""" returns an Elasticsearch instance configured using an es_conn_config """
es_conn_conf = build_es_conn_config(conf)
auth = Auth()
es_conn_conf['http_auth'] = auth(host=es_conn_conf['es_host'],
username=es_conn_conf['es_username'],
password=es_conn_conf['es_password'],
aws_region=es_conn_conf['aws_region'],
boto_profile=es_conn_conf['boto_profile'])
return Elasticsearch(host=es_conn_conf['es_host'],
port=es_conn_conf['es_port'],
url_prefix=es_conn_conf['es_url_prefix'],
use_ssl=es_conn_conf['use_ssl'],
verify_certs=es_conn_conf['verify_certs'],
connection_class=RequestsHttpConnection,
http_auth=es_conn_conf['http_auth'],
timeout=es_conn_conf['es_conn_timeout'],
send_get_body_as=es_conn_conf['send_get_body_as'])
def build_es_conn_config(conf):
""" Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port'
'es_username' and 'es_password', this will return a new dictionary
with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which
will be a basicauth username:password formatted string """
parsed_conf = {}
parsed_conf['use_ssl'] = False
parsed_conf['verify_certs'] = True
parsed_conf['http_auth'] = None
parsed_conf['es_username'] = None
parsed_conf['es_password'] = None
parsed_conf['aws_region'] = None
parsed_conf['boto_profile'] = None
parsed_conf['es_host'] = conf['es_host']
parsed_conf['es_port'] = conf['es_port']
parsed_conf['es_url_prefix'] = ''
parsed_conf['es_conn_timeout'] = conf.get('es_conn_timeout', 20)
parsed_conf['send_get_body_as'] = conf.get('es_send_get_body_as', 'GET')
if 'es_username' in conf:
parsed_conf['es_username'] = conf['es_username']
parsed_conf['es_password'] = conf['es_password']
if 'aws_region' in conf:
parsed_conf['aws_region'] = conf['aws_region']
if 'boto_profile' in conf:
parsed_conf['boto_profile'] = conf['boto_profile']
if 'use_ssl' in conf:
parsed_conf['use_ssl'] = conf['use_ssl']
if 'verify_certs' in conf:
parsed_conf['verify_certs'] = conf['verify_certs']
if 'es_url_prefix' in conf:
parsed_conf['es_url_prefix'] = conf['es_url_prefix']
return parsed_conf
|
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import overrides
import py2neo
from py2neo import neo4j
from exc import *
from utils import classproperty
from metadata import metadata as m
from properties import Property, FieldDescriptor
from observable import Observable, ObservableMeta
from dummy import DummyEntity
__all__ = ['Entity']
class EntityMeta(ObservableMeta):
def __init__(cls, name, bases, dict_):
super(EntityMeta, cls).__init__(name, bases, dict_)
# inherited labels
cls._labels = cls._labels + (name,) if hasattr(cls, '_labels') else (name,)
# inherited descriptors
cls._descriptors = cls._descriptors.copy() if hasattr(cls, '_descriptors') else {}
for base in bases:
if hasattr(base, '_descriptors'):
for k, v in base._descriptors.iteritems():
if k not in cls._descriptors:
cls._descriptors[k] = v
# class-defined descriptors
for k, v in dict_.iteritems():
if isinstance(v, FieldDescriptor):
cls._descriptors[k] = v
v.name = k
m.add(cls)
class Entity(Observable):
"""Base class for all Neolixir entities (Nodes and Relationships).
Defines basic shared functionality and handles proper subclassing and
instance initialization, instance registration and descriptor setup.
Should not be used directly, always use :class:`node.Node` or
:class:`relationship.Relationship` instead.
:param value: A :class:`py2neo.neo4j.Node` or :class:`py2neo.neo4j.Relationship` instance, or None.
:param \*\*properties: Keyword arguments will be used to initialize the entity's properties.
:returns: An :class:`Entity` or a subclass thereof.
"""
__metaclass__ = EntityMeta
_deleted = False
@classmethod
def get(cls, value, **properties):
if isinstance(value, cls):
return value
instance = m.session.get(value)
if instance is not None:
return instance
elif isinstance(value, (DummyEntity, neo4j.Node, neo4j.Relationship)):
try:
loaded_properties = m.session.propmap.get_properties(value)
except GraphError as e:
if str(e).find('not found') > 0:
raise EntityNotFoundException(str(e))
raise e
valcls = m.classes.get(loaded_properties.get('__class__'))
if not valcls or not issubclass(valcls, cls):
raise TypeError("entity is not an instance of " + cls.__name__)
return valcls(entity=value, **properties)
else:
return cls(entity=value, **properties)
def __init__(self, entity=None, **properties):
if isinstance(entity, (DummyEntity, neo4j.Node, neo4j.Relationship)):
self._entity = entity
else:
self._entity = None
for k, v in properties.iteritems():
if k in self._descriptors:
setattr(self, k, v)
else:
self.properties[k] = v
m.session.add(self)
if self.is_phantom():
self.fire_event('create', self)
def __copy__(self):
# TODO: support copying?
return self
def __deepcopy__(self, memo):
# TODO: support deepcopying?
return self
def _get_repr_data(self):
return ["Id = {0}".format(self.id),
"Descriptors = {0}".format(sorted(self.descriptors.keys())),
"Properties = {0}".format(self.properties)]
def __repr__(self):
return "<{0} (0x{1:x}): \n{2}\n>".format(self.__class__.__name__, id(self),
"\n".join(self._get_repr_data()))
@property
def _entity(self):
return self.__entity
@_entity.setter
def _entity(self, value):
self.__entity = value
if value is not None:
value.properties
@property
def cls(self):
return self.__class__
@property
def id(self):
return getattr(self._entity, 'id', None)
@property
def descriptors(self):
return self._descriptors
@property
def properties(self):
try:
return self._properties
except AttributeError:
self._properties = m.session.propmap.get_properties(self)
self._properties.owner = self
return self._properties
def get_properties(self):
data = {}
for k, v in self._descriptors.iteritems():
if isinstance(v, Property):
data[k] = getattr(self, k)
for k, v in self.properties.iteritems():
data.setdefault(k, v)
return data
def set_properties(self, data):
for k, v in data.iteritems():
if k in self._descriptors:
setattr(self, k, v)
else:
self.properties[k] = v
def get_abstract(self, exclude_null=False):
self.properties.sanitize()
if exclude_null:
return {k: v for k, v in self.properties.iteritems() if v is not None}
else:
return self.properties
def set_entity(self, entity):
if self._entity is None:
self._entity = entity
try:
del self._properties
except AttributeError:
pass
if getattr(self, '_session', None):
self._session.add(self)
return True
else:
return False
def is_phantom(self):
return self._entity is None
def is_deleted(self):
return self._deleted
def is_dirty(self):
if self.is_deleted():
return True
elif not hasattr(self, '_properties'):
return False
else:
return self.properties.is_dirty()
def is_expunged(self):
return getattr(self, '_session', None) is None
def expunge(self):
if getattr(self, '_session', None):
self._session.expunge(self)
self._session = None
def rollback(self):
self._deleted = False
try:
del self._properties
except AttributeError:
pass
def delete(self):
self._deleted = True
if self.is_phantom():
self.expunge()
else:
self.fire_event('delete', self)
def save(self, batch=None):
raise NotImplementedError("cannot save through generic Entity class")
def has_observer(self, event, target):
return super(Entity, self).has_observer(event, target) or \
(event == 'change' and super(Entity, self).has_observer('update', target)) or \
(event in ('change', 'append', 'remove') and target in self.descriptors and \
self.descriptors[target].has_observer(event, target))
def fire_event(self, event, target, *args):
if not (self.is_expunged() and event != 'delete_committed'):
super(Entity, self).fire_event(event, target, *args)
if event == 'change':
self.fire_event('update', self)
if target in self.descriptors:
self.descriptors[target].fire_event(event, (self, target), *args)
elif event in ('append', 'remove') and target in self.descriptors:
self.descriptors[target].fire_event(event, (self, target), *args)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bilean.common import exception
from bilean.common.i18n import _
from bilean.common import utils
from bilean.db import api as db_api
from bilean.engine import event as event_mod
from bilean.engine import resource as resource_mod
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
LOG = logging.getLogger(__name__)
class User(object):
"""User object contains all user operations"""
statuses = (
INIT, FREE, ACTIVE, WARNING, FREEZE,
) = (
'INIT', 'FREE', 'ACTIVE', 'WARNING', 'FREEZE',
)
def __init__(self, user_id, **kwargs):
self.id = user_id
self.policy_id = kwargs.get('policy_id', None)
self.balance = kwargs.get('balance', 0)
self.rate = kwargs.get('rate', 0.0)
self.credit = kwargs.get('credit', 0)
self.last_bill = kwargs.get('last_bill', None)
self.status = kwargs.get('status', self.INIT)
self.status_reason = kwargs.get('status_reason', 'Init user')
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.deleted_at = kwargs.get('deleted_at', None)
def store(self, context):
"""Store the user record into database table."""
values = {
'policy_id': self.policy_id,
'balance': self.balance,
'rate': self.rate,
'credit': self.credit,
'last_bill': self.last_bill,
'status': self.status,
'status_reason': self.status_reason,
'created_at': self.created_at,
'updated_at': self.updated_at,
'deleted_at': self.deleted_at,
}
if self.created_at:
db_api.user_update(context, self.id, values)
else:
values.update(id=self.id)
user = db_api.user_create(context, values)
self.created_at = user.created_at
return self.id
@classmethod
def init_users(cls, context):
"""Init users from keystone."""
k_client = context.clients.client('keystone')
tenants = k_client.tenants.list()
tenant_ids = [tenant.id for tenant in tenants]
users = cls.load_all(context)
user_ids = [user.id for user in users]
for tid in tenant_ids:
if tid not in user_ids:
user = cls(tid, status=cls.INIT,
status_reason='Init from keystone')
user.store(context)
return True
@classmethod
def _from_db_record(cls, record):
'''Construct a user object from database record.
:param record: a DB user object that contains all fields;
'''
kwargs = {
'policy_id': record.policy_id,
'balance': record.balance,
'rate': record.rate,
'credit': record.credit,
'last_bill': record.last_bill,
'status': record.status,
'status_reason': record.status_reason,
'created_at': record.created_at,
'updated_at': record.updated_at,
'deleted_at': record.deleted_at,
}
return cls(record.id, **kwargs)
@classmethod
def load(cls, context, user_id=None, user=None, realtime=False,
show_deleted=False, tenant_safe=True):
'''Retrieve a user from database.'''
if user is None:
user = db_api.user_get(context, user_id,
show_deleted=show_deleted,
tenant_safe=tenant_safe)
if user is None:
raise exception.UserNotFound(user=user_id)
u = cls._from_db_record(user)
if not realtime:
return u
if u.rate > 0 and u.status != u.FREEZE:
seconds = (timeutils.utcnow() - u.last_bill).total_seconds()
u.balance -= u.rate * seconds
return u
@classmethod
def load_all(cls, context, show_deleted=False, limit=None,
marker=None, sort_keys=None, sort_dir=None,
filters=None):
'''Retrieve all users of from database.'''
records = db_api.user_get_all(context, show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters)
return [cls._from_db_record(record) for record in records]
@classmethod
def delete(cls, context, user_id=None, user=None):
'''Delete a user from database.'''
if user is not None:
db_api.user_delete(context, user_id=user.id)
return True
elif user_id is not None:
db_api.user_delete(context, user_id=user_id)
return True
return False
def to_dict(self):
user_dict = {
'id': self.id,
'policy_id': self.policy_id,
'balance': self.balance,
'rate': self.rate,
'credit': self.credit,
'last_bill': utils.format_time(self.last_bill),
'status': self.status,
'status_reason': self.status_reason,
'created_at': utils.format_time(self.created_at),
'updated_at': utils.format_time(self.updated_at),
'deleted_at': utils.format_time(self.deleted_at),
}
return user_dict
def set_status(self, status, reason=None):
'''Set status of the user.'''
self.status = status
if reason:
self.status_reason = reason
def update_with_resource(self, context, resource, action='create'):
'''Update user with resource'''
if 'create' == action:
d_rate = resource.rate
if self.rate > 0:
self.do_bill(context)
elif 'delete' == action:
self.do_bill(context)
d_rate = -resource.rate
elif 'update' == action:
self.do_bill(context)
d_rate = resource.d_rate
self._change_user_rate(context, d_rate)
self.store(context)
def _change_user_rate(self, context, d_rate):
# Update the rate of user
old_rate = self.rate
new_rate = old_rate + d_rate
if old_rate == 0 and new_rate > 0:
self.last_bill = timeutils.utcnow()
if d_rate > 0 and self.status == self.FREE:
self.status = self.ACTIVE
elif d_rate < 0:
if new_rate == 0 and self.balance > 0:
self.status = self.FREE
elif self.status == self.WARNING:
p_time = cfg.CONF.bilean_task.prior_notify_time * 3600
rest_usage = p_time * new_rate
if self.balance > rest_usage:
self.status = self.ACTIVE
self.rate = new_rate
def do_recharge(self, context, value):
'''Do recharge for user.'''
if self.rate > 0 and self.status != self.FREEZE:
self.do_bill(context)
self.balance += value
if self.status == self.INIT and self.balance > 0:
self.set_status(self.ACTIVE, reason='Recharged')
elif self.status == self.FREEZE and self.balance > 0:
reason = _("Status change from freeze to active because "
"of recharge.")
self.set_status(self.ACTIVE, reason=reason)
elif self.status == self.WARNING:
prior_notify_time = cfg.CONF.bilean_task.prior_notify_time * 3600
rest_usage = prior_notify_time * self.rate
if self.balance > rest_usage:
reason = _("Status change from warning to active because "
"of recharge.")
self.set_status(self.ACTIVE, reason=reason)
event_mod.record(context, self.id, action='recharge', value=value)
self.store(context)
def _freeze(self, context, reason=None):
'''Freeze user when balance overdraft.'''
LOG.info(_("Freeze user because of: %s") % reason)
self._release_resource(context)
LOG.info(_("Balance of user %s overdraft, change user's "
"status to 'freeze'") % self.id)
self.status = self.FREEZE
self.status_reason = reason
def _release_resource(self, context):
'''Do freeze user, delete all resources ralated to user.'''
filters = {'user_id': self.id}
resources = resource_mod.Resource.load_all(context, filters=filters)
for resource in resources:
resource.do_delete(context)
def do_delete(self, context):
db_api.user_delete(context, self.id)
return True
def do_bill(self, context):
'''Do bill once, pay the cost until now.'''
now = timeutils.utcnow()
total_seconds = (now - self.last_bill).total_seconds()
self.balance = self.balance - self.rate * total_seconds
self.last_bill = now
if self.balance < 0:
self._freeze(context, reason="Balance overdraft")
self.store(context)
event_mod.record(context, self.id,
action='charge',
seconds=total_seconds)
|
|
from collections import OrderedDict
from .models import Identifier
from .signals import import_complete, new_metadata
from . import registry, wizard_task, InputNeeded
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from natural_keys import NaturalKeySerializer
from html_json_forms import parse_json_form
import json
import logging
try:
import reversion
except ImportError:
reversion = None
PRIORITY = {
"instance": 1,
"attribute": 2,
"meta": 3,
"unresolved": 4,
"unknown": 5,
}
def get_ct(model):
return ContentType.objects.get_for_model(model)
def ctid(ct):
return "%s.%s" % (ct.app_label, ct.model)
def metaname(cls):
return ctid(get_ct(cls)) + "_meta"
def get_id(obj, field):
if isinstance(field, NaturalKeySerializer):
data = list(type(field)(obj).data.values())
return data[0]
else:
return field.to_representation(obj)
def colname(i):
if i >= 26:
q, r = divmod(i, 26)
return colname(q - 1) + colname(r)
else:
return chr(ord('A') + i)
@wizard_task(label="Processing Data", url_path="auto", use_async=True)
def auto_import(run):
"""
Walk through all the steps necessary to interpret and import data from an
Iter. Meant to be called asynchronously. Automatically suspends import if
any additional input is needed from the user.
"""
tasks = run.get_auto_import_tasks()
run.add_event("auto_import")
return run.run_all(tasks)
@wizard_task(label="Checking configuration...", url_path=False)
def check_serializer(run):
if not run.serializer:
raise InputNeeded("serializers")
@wizard_task(label="Serializers", url_path="serializers")
def list_serializers(run):
result = {}
result["serializer_choices"] = [
{
"name": s["class_name"],
"label": s["name"],
}
for s in registry.get_serializers()
if s["options"].get("show_in_list", True)
]
return result
@wizard_task(label="Update Serializer")
def updateserializer(run, post={}):
name = post.get("serializer", None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event("update_serializer")
result = list_serializers(run)
result.update(
current_mode="serializers",
)
return result
@wizard_task(label="Loading Data...", url_path=False)
def check_iter(run):
# Preload Iter to catch any load errors early
run.load_iter()
def get_attribute_field(field):
for cname, cfield in field.child.fields.items():
if isinstance(cfield, serializers.RelatedField):
return cname, cfield
return None, None
def compute_attr_field(value_field, attr_name):
parts = value_field.split("[")
parts[-1] = attr_name + "]"
return "[".join(parts)
def get_choices(run):
def make_list(choices):
return [
{
"id": row.pk,
"label": str(row),
}
for row in choices
]
Serializer = run.get_serializer()
field_choices = set()
def load_fields(
serializer,
group_name,
label_prefix="",
name_prefix="",
attribute_name=None,
attribute_choices=None,
):
fields = serializer.fields.items()
if len(fields) == 1 and isinstance(serializer, NaturalKeySerializer):
is_natkey_lookup = True
else:
is_natkey_lookup = False
for name, field in fields:
if field.read_only:
continue
if name_prefix:
qualname = name_prefix + ("[%s]" % name)
else:
qualname = name
label = (field.label or name).title()
if label_prefix:
quallabel = label_prefix + " " + label
else:
quallabel = label
if isinstance(field, NaturalKeySerializer):
load_fields(
field,
group_name,
label_prefix=quallabel,
name_prefix=qualname,
)
elif isinstance(field, serializers.ListSerializer):
attr_name, attr_field = get_attribute_field(field)
if not attr_field:
raise Exception(
"Could not determine EAV attribute field"
' for nested "%s" serializer!' % qualname
)
choices = make_list(attr_field.get_queryset())
load_fields(
field.child,
group_name=quallabel,
label_prefix="",
name_prefix=qualname + "[]",
attribute_name=attr_name,
attribute_choices=choices,
)
elif attribute_choices:
if isinstance(field, serializers.RelatedField):
continue
for choice in attribute_choices:
field_choices.add(
(
group_name,
"%s;%s=%s"
% (qualname, attribute_name, choice["id"]),
"%s for %s" % (label, choice["label"]),
False,
field,
)
)
elif isinstance(field, serializers.ModelSerializer):
load_fields(
field,
group_name=quallabel,
label_prefix="",
name_prefix=qualname,
)
else:
if is_natkey_lookup:
is_lookup = True
lookup_field = serializer
else:
is_lookup = isinstance(field, serializers.RelatedField)
lookup_field = field
field_choices.add(
(group_name, qualname, quallabel, is_lookup, lookup_field)
)
if hasattr(Serializer, "Meta") and hasattr(Serializer.Meta, "model"):
root_label = Serializer.Meta.model._meta.verbose_name.title()
else:
root_label = run.serializer_label
serializer = Serializer(
context={
"data_wizard": {
"run": run,
}
}
)
load_fields(serializer, root_label)
field_choices.add(
("Other", "__ignore__", "Ignore this Column", False, None)
)
field_choices = sorted(field_choices, key=lambda d: d[1])
choices = [
{
"id": name,
"label": label,
"is_lookup": is_lookup,
"group": group_name,
"field": field,
}
for group_name, name, label, is_lookup, field in field_choices
]
return choices
def get_choice_groups(run):
choices = get_choices(run)
groups = OrderedDict()
for choice in choices:
groups.setdefault(choice["group"], [])
groups[choice["group"]].append(
{
"id": choice["id"],
"label": choice["label"],
}
)
return [
{"name": group, "choices": group_choices}
for group, group_choices in groups.items()
]
def get_choice_ids(run):
return [choice["id"] for choice in get_choices(run)]
@wizard_task(label="Parsing Columns...", url_path=False)
def check_columns(run):
result = read_columns(run)
if result["unknown_count"]:
raise InputNeeded("columns", result["unknown_count"])
return result
@wizard_task("Columns", url_path="columns")
def read_columns(run):
matched = get_columns(run)
unknown_count = 0
for info in matched:
if info["type"] == "unknown":
unknown_count += 1
# Add some useful context items for client
info["unknown"] = True
info["types"] = get_choice_groups(run)
assert info["type"] != "unresolved"
# Parse row identifiers
return {
"columns": matched,
"unknown_count": unknown_count,
}
# FIXME: These functions might make more sense as methods on Run
def get_columns(run):
if run.already_parsed():
return load_columns(run)
else:
return parse_columns(run)
def get_lookup_columns(run):
cols = []
choices = {
choice["id"]: choice
for choice in get_choices(run)
if choice["is_lookup"]
}
for col in get_columns(run):
if "colnum" not in col or col["type"] != "meta":
continue
if col["field_name"] not in choices:
continue
col = col.copy()
info = choices[col["field_name"]]
if isinstance(info["field"], NaturalKeySerializer):
# FIXME: how to override this?
queryset = info["field"].Meta.model.objects.all()
else:
queryset = info["field"].get_queryset()
col["serializer_field"] = info["field"]
col["queryset"] = queryset
cols.append(col)
return cols
def load_columns(run):
table = run.load_iter()
cols = list(table.field_map.keys())
matched = []
ranges = run.range_set.filter(
identifier__serializer=run.serializer
).exclude(type="data")
for rng in ranges:
ident = rng.identifier
info = {
"match": str(ident),
"mapping": ident.mapping_label,
"rel_id": rng.pk,
"type": ident.type,
}
if ident.type == "meta":
info["field_name"] = rng.identifier.field
elif ident.type == "attribute":
info["field_name"] = rng.identifier.field
info["attr_id"] = rng.identifier.attr_id
info["attr_field"] = rng.identifier.attr_field
else:
info["value"] = ident.name
if rng.type == "list":
col = rng.start_col
info["name"] = cols[col].replace("\n", " - ")
info["column"] = colname(col)
info["colnum"] = col
elif rng.type == "value":
info["name"] = get_range_value(
table, rng, rng.header_col, rng.start_col - 1
)
info["meta_value"] = get_range_value(
table, rng, rng.start_col, rng.end_col
)
info["colnum"] = rng.start_col
info["rownum"] = rng.start_row
matched.append(info)
matched.sort(key=lambda info: info.get("colnum", -1))
return matched
def get_range_value(table, rng, scol, ecol):
if rng.start_row == rng.end_row and scol == ecol:
return table.extra_data.get(rng.start_row, {}).get(scol)
val = ""
for r in range(rng.start_row, rng.end_row + 1):
for c in range(scol, ecol + 1):
val += str(table.extra_data.get(r, {}).get(c, ""))
return val
def parse_columns(run):
run.add_event("parse_columns")
table = run.load_iter()
if table.tabular:
for r in table.extra_data:
row = table.extra_data[r]
for c in row:
if c + 1 in row and c - 1 not in row:
parse_column(
run,
row[c],
type="value",
start_row=r,
end_row=r,
header_col=c,
start_col=c + 1,
end_col=c + 1,
)
for i, name in enumerate(table.field_map.keys()):
if table.tabular:
header_row = table.header_row
start_row = table.start_row
else:
header_row = -1
start_row = 0
name = table.clean_field_name(name)
parse_column(
run,
name=name,
type="list",
header_row=header_row,
start_row=start_row,
end_row=start_row + len(table) - 1,
start_col=i,
end_col=i,
)
return load_columns(run)
def parse_column(run, name, **kwargs):
matches = list(
Identifier.objects.filter(
serializer=run.serializer,
name__iexact=name,
)
)
if len(matches) > 0:
matches.sort(key=lambda ident: PRIORITY.get(ident.type, 0))
ident = matches[0]
else:
if name in get_choice_ids(run):
field = name
else:
field = None
ident = Identifier.objects.create(
serializer=run.serializer,
name=name,
field=field,
resolved=(field is not None),
)
run.range_set.create(identifier=ident, **kwargs)
@wizard_task(label="Update Columns", url_path="updatecolumns")
def update_columns(run, post={}):
run.add_event("update_columns")
if isinstance(post.get("columns"), list):
for col in post["columns"]:
rel_id = col.get("id")
mapping = col.get("mapping")
if rel_id and mapping:
post[f"rel_{rel_id}"] = mapping
matched = get_columns(run)
for col in matched:
if col["type"] != "unknown":
continue
val = post.get("rel_%s" % col["rel_id"], None)
if not val:
continue
ident = run.range_set.get(pk=col["rel_id"]).identifier
assert ident.field is None
if val not in get_choice_ids(run):
continue
if ";" in val:
field, attr_info = val.split(";")
attr_name, attr_id = attr_info.split("=")
attr_field = compute_attr_field(field, attr_name)
else:
field = val
attr_id = None
attr_field = None
ident.field = field
ident.attr_id = attr_id
ident.attr_field = attr_field
ident.resolved = True
ident.save()
new_metadata.send(
sender=update_columns,
run=run,
identifier=ident,
)
result = read_columns(run)
result.update(
current_mode="columns",
)
return result
@wizard_task(label="Parsing Identifiers...", url_path=False)
def check_row_identifiers(run):
result = read_row_identifiers(run)
if result["unknown_count"]:
raise InputNeeded("ids", result["unknown_count"])
return result
@wizard_task(label="Identifiers", url_path="ids")
def read_row_identifiers(run):
if run.range_set.filter(type="data").exists():
return load_row_identifiers(run)
else:
return parse_row_identifiers(run)
def parse_row_identifiers(run):
run.add_event("parse_row_identifiers")
idmap = run.get_idmap()
lookup_cols = get_lookup_columns(run)
lookup_fields = OrderedDict()
for col in lookup_cols:
field_name = col["field_name"]
lookup_fields.setdefault(
field_name,
{
"cols": [],
"ids": OrderedDict(),
"serializer_field": col["serializer_field"],
"start_col": 1e10,
"end_col": -1,
},
)
info = lookup_fields[field_name]
info["cols"].append(col)
info["start_col"] = min(info["start_col"], col["colnum"])
info["end_col"] = max(info["end_col"], col["colnum"])
if "meta_value" in col:
info["is_meta_value"] = True
info["ids"] = {
col["meta_value"]: {
"count": 1,
"start_row": col["rownum"],
"end_row": col["rownum"],
}
}
assert info["start_col"] < 1e10
assert info["end_col"] > -1
table = run.load_iter()
for i, row in enumerate(table):
for field_name, info in lookup_fields.items():
if "is_meta_value" in info:
continue
names = [str(row[col["colnum"]]) for col in info["cols"]]
name = " ".join(names)
info["ids"].setdefault(
name,
{
"count": 0,
"start_row": 1e10,
"end_row": -1,
},
)
idinfo = info["ids"][name]
idinfo["count"] += 1
rownum = i
if table.tabular:
rownum += table.start_row
idinfo["start_row"] = min(idinfo["start_row"], rownum)
idinfo["end_row"] = max(idinfo["end_row"], rownum)
assert idinfo["start_row"] < 1e10
assert idinfo["end_row"] > -1
for field_name, info in lookup_fields.items():
for name, idinfo in info["ids"].items():
ident = Identifier.objects.filter(
serializer=run.serializer,
field=field_name,
name__iexact=name,
).first()
if not ident:
value = idmap(name, info["serializer_field"])
ident = Identifier.objects.create(
serializer=run.serializer,
field=field_name,
name=name,
value=value,
resolved=value is not None,
)
run.range_set.create(
type="data",
identifier=ident,
start_col=info["start_col"],
end_col=info["end_col"],
start_row=idinfo["start_row"],
end_row=idinfo["end_row"],
count=idinfo["count"],
)
return load_row_identifiers(run)
def load_row_identifiers(run):
ids = {}
lookup_cols = get_lookup_columns(run)
for rng in run.range_set.filter(type="data"):
ident = rng.identifier
info = None
for col in lookup_cols:
if col["field_name"] == ident.field:
info = col
if not info:
continue
model = info["queryset"].model
ids.setdefault(model, {})
ids[model][ident] = rng.count, info
unknown_ids = 0
idgroups = []
for model in ids:
mtype = get_ct(model)
idinfo = {
"type_id": ctid(mtype),
"type_label": mtype.name.title(),
"ids": [],
}
for ident, (count, col) in ids[model].items():
info = {
"value": ident.name,
"count": count,
}
if ident.resolved:
info["match"] = ident.value or ident.name
else:
assert ident.type == "unresolved"
unknown_ids += 1
field = col["serializer_field"]
info["ident_id"] = ident.pk
info["unknown"] = True
info["choices"] = [
{
"id": get_id(choice, field),
"label": str(choice),
}
for choice in col["queryset"]
]
if isinstance(field, NaturalKeySerializer):
info["choices"].insert(
0,
{
"id": "new",
"label": "New %s" % idinfo["type_label"],
},
)
idinfo["ids"].append(info)
idinfo["ids"].sort(key=lambda info: info["value"])
idgroups.append(idinfo)
return {
"unknown_count": unknown_ids,
"types": idgroups,
}
@wizard_task(label="Update Identifiers", url_path="updateids")
def update_row_identifiers(run, post={}):
run.add_event("update_row_identifiers")
for value in list(post.values()):
if not isinstance(value, list):
continue
for ident in value:
if not isinstance(ident, dict):
continue
ident_id = ident.get("id")
mapping = ident.get("mapping")
if ident_id and mapping:
post[f"ident_{ident_id}_id"] = mapping
unknown = run.range_set.filter(
type="data",
identifier__resolved=False,
)
for rng in unknown:
ident = rng.identifier
ident_id = post.get("ident_%s_id" % ident.pk, None)
if not ident_id:
continue
if ident_id == "new":
ident.value = ident.name
else:
ident.value = ident_id
ident.resolved = True
ident.save()
new_metadata.send(
sender=update_row_identifiers,
run=run,
identifier=ident,
)
result = read_row_identifiers(run)
result.update(
current_mode="ids",
)
return result
@wizard_task(label="Importing Data...", url_path="data", use_async=True)
def import_data(run):
"""
Import all parseable data from the dataset instance's Iter class.
"""
if reversion:
with reversion.create_revision():
reversion.set_user(run.user)
reversion.set_comment("Imported via %s" % run)
result = _do_import(run)
else:
result = _do_import(run)
return result
def get_rows(run):
# (Re-)Load data and column information
table = run.load_iter()
matched = get_columns(run)
# Set any global defaults defined within data themselves (usually as extra
# cells above the headers in a spreadsheet)
run_globals = {}
for col in matched:
if "meta_value" in col:
save_value(col, col["meta_value"], run_globals)
for row in table:
yield build_row(run, row, run_globals, matched)
def _do_import(run):
run.add_event("do_import")
# Loop through table rows and add each record
table = run.load_iter()
rows = len(table)
skipped = []
if table.tabular:
def rownum(i):
return i + table.start_row
else:
def rownum(i):
return i
for i, row in enumerate(get_rows(run)):
# Update state (for status() on view)
run.send_progress(
{
"message": "Importing Data...",
"stage": "data",
"current": i,
"total": rows,
"skipped": skipped,
}
)
# Create report, capturing any errors
obj, error = import_row(run, i, row)
if error:
success = False
fail_reason = error
skipped.append({"row": rownum(i) + 1, "reason": fail_reason})
else:
success = True
fail_reason = None
# Record relationship between data source and resulting report (or
# skipped record), including specific cell range.
run.record_set.create(
row=rownum(i),
content_object=obj,
success=success,
fail_reason=fail_reason,
)
# Send completion signal (in case any server handlers are registered)
status = {"current": i + 1, "total": rows, "skipped": skipped}
run.add_event("import_complete")
run.record_count = run.record_set.filter(success=True).count()
run.save()
run.send_progress(status, state="SUCCESS")
import_complete.send(sender=import_data, run=run, status=status)
return status
def build_row(run, row, instance_globals, matched):
"""
Compile spreadsheet row into serializer data format
"""
# Copy global values to record hash
record = {key: instance_globals[key] for key in instance_globals}
for col in matched:
if "colnum" in col and "meta_value" not in col:
val = row[col["colnum"]]
save_value(col, val, record)
seen = set()
for col in matched:
field_name = col["field_name"]
if col["type"] == "meta" and field_name not in seen:
seen.add(field_name)
ident = Identifier.objects.filter(
serializer=run.serializer,
name__iexact=str(record[field_name]),
).first()
if ident and ident.value:
record[field_name] = ident.value
record.pop("_attr_index", None)
return parse_json_form(record)
def import_row(run, i, record):
"""
Create actual report instance from parsed values.
"""
Serializer = run.get_serializer()
try:
serializer = Serializer(
data=record,
context={
"data_wizard": {
"run": run,
"row": i,
}
},
)
if serializer.is_valid():
with transaction.atomic():
obj = serializer.save()
error = None
else:
obj = None
error = json.dumps(serializer.errors)
except Exception as e:
logging.warning(
"{run}: Error In Row {row}".format(
run=run,
row=i,
)
)
logging.exception(e)
obj = None
error = repr(e)
return obj, error
def save_value(col, val, obj):
"""
For each cell in each row, use parsed col(umn) information to determine how
to apply the cell val(ue) to the obj(ect hash).
"""
# In some spreadsheets (i.e. "horizontal" tables), multiple columns
# indicate attribute names and each row contains result values. In others
# (i.e. "vertical" tables), each row lists both the attribute name and the
# value.
if col["type"] == "attribute":
# Attribute value in a "horizontal" table
save_attribute_value(col, val, obj)
elif col["type"] == "meta":
# Metadata value in either a "horizontal" or "vertical" table
set_value(obj, col["field_name"], val)
def save_attribute_value(col, val, obj):
"""
This column was identified as an EAV attribute; update nested array with
the cell value from this row.
"""
if "attr_field" not in col:
raise Exception("Unexpected EAV value!")
if "_attr_index" not in obj:
obj["_attr_index"] = {col["attr_id"]: 0}
else:
obj["_attr_index"].setdefault(
col["attr_id"], (max(obj["_attr_index"].values()) or 0) + 1
)
index = obj["_attr_index"][col["attr_id"]]
value_field = col["field_name"].replace("[]", "[%s]" % index)
attr_field = col["attr_field"].replace("[]", "[%s]" % index)
set_value(obj, value_field, val)
obj[attr_field] = col["attr_id"]
def set_value(obj, field_name, val):
if field_name in obj and val is not None:
if obj[field_name] is None:
obj[field_name] = val
else:
val = "%s %s" % (obj[field_name], val)
obj[field_name] = val
|
|
from django.utils import timezone
from django.shortcuts import render, render_to_response
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib import messages
from .forms import ReportForm, AttachmentForm
import re
from django.db.models import Q
from django.template import RequestContext
from .models import Report, Folder, Attachment
from django.contrib.auth.models import Group, User
from Crypto.Hash import MD5
from Crypto.Cipher import DES
from Crypto.Hash import SHA256
from django.core.mail import send_mail
@login_required
def reports(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
f = ReportForm(request.POST)
# check whether it's valid:
if f.is_valid():
# Save the form data to the database.
# But dont yet commit, we still have some data to add.
report = f.save(commit=False)
report.create_date = timezone.now()
report.creator = request.user
# NOW we can save
report.save();
folder = request.user.folder_set.get(label="Uncategorized")
folder.reports.add(report)
folder.save()
# redirect to a new URL:
messages.success(request, 'Report Created.')
return HttpResponseRedirect('/')
else:
messages.warning(request, 'Report Not Created!')
return render(request, 'reports/reports.html', {'report_form': f})
# if a GET (or any other method) we'll create a blank form
else:
report_form = ReportForm()
return render(request, 'reports/reports.html', {'report_form': report_form})
return render(request, 'reports/reports.html')
@login_required
def folders(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
if not Folder.objects.filter(owner=request.user, label=request.POST['folder_name']):
newfolder = Folder()
newfolder.label= request.POST['folder_name']
newfolder.owner = request.user
newfolder.save()
messages.success(request, 'Folder Created!')
return HttpResponseRedirect('/')
else:
messages.warning(request, 'You already have a folder with that name!')
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
@login_required
def attachments(request, report_id):
if request.method == "POST":
report = Report.objects.get(id=report_id)
f = AttachmentForm(request.POST, request.FILES)
# check whether it's valid:
print('upload' in request.FILES)
if f.is_valid():
# Save the form data to the database.
# But dont yet commit, we still have some data to add.
attachment = f.save(commit=False)
attachment.upload_date = timezone.now()
attachment.report = report
# NOW we can save
attachment.save();
h = MD5.new()
chunk_size = 8192
with open(attachment.upload.path, 'rb') as f:
while True:
chunk = f.read(chunk_size)
if len(chunk) == 0:
break
h.update(chunk)
attachment.key = h.hexdigest()
attachment.save()
messages.success(request, 'Attachment added!')
return render(request, 'reports/read_report.html', {'report': report, "attachment_form": AttachmentForm()})
else:
messages.warning(request, 'Attachment failed to add!')
return render(request, 'reports/read_report.html', {'report': report, "attachment_form": f})
@login_required
def delete_report(request, report_id):
if Report.objects.filter(creator=request.user) or request.user.is_superuser:
Report.objects.get(id=report_id).delete()
messages.success(request, 'Report destroyed')
if request.user.is_superuser:
return HttpResponseRedirect('/accounts/sitemanager/')
else:
return HttpResponseRedirect('/')
else:
messages.warning(request, "Your report was not deleted.")
if request.user.is_superuser:
return HttpResponseRedirect('/accounts/sitemanager/')
else:
return HttpResponseRedirect('/')
@login_required
def edit_report(request, report_id):
report = Report.objects.get(id=report_id)
if request.method == 'POST':
form = ReportForm(request.POST, instance=report)
if form.is_valid():
report = form.save(commit=True)
messages.success(request, 'Report Edited.')
return HttpResponseRedirect('/')
else:
form = ReportForm(instance=report)
messages.warning(request, 'Report Not Edited!')
#return HttpResponseRedirect('/')
else:
form = ReportForm(instance=report)
return render(request, 'reports/edit_report.html', {'report_form': form,'report_id':report_id})
@login_required
def edit_folder(request, folder_id):
if request.method == 'POST': #rename folder
if not Folder.objects.filter(owner=request.user, label=request.POST['folder_name']):
Folder.objects.filter(pk=folder_id).update(label=request.POST['folder_name'])
messages.success(request, 'Folder Renamed!')
return HttpResponseRedirect('/')
else:
messages.warning(request, 'You already have a folder with that name!')
return render(request, 'reports/rename_folder.html')
if(request.GET.get('dlt')): #button pressed, delete folder
if Folder.objects.filter(owner=request.user):
Folder.objects.get(id=folder_id).delete()
messages.success(request, 'Folder destroyed')
return HttpResponseRedirect('/')
else:
messages.warning(request, "Your folder is still among us.")
return HttpResponseRedirect('/')
f = Folder.objects.get(pk=folder_id)
return render(request, 'reports/rename_folder.html', {'folder_id':folder_id, 'labeled':f.label})
@login_required
def move(request):
if request.method == 'POST':
rep = Report.objects.get(id = request.POST['currep'])
try:
Folder.objects.get(id = request.POST['move_to']).reports.add(rep)
Folder.objects.get(id = request.POST['move_from']).reports.remove(rep)
messages.success(request, 'Folder successfully moved!')
except:
Folder.objects.get(id = request.POST['move_to']).reports.add(rep)
messages.success(request, "here i am")
# f = rep.folder_set.all()
# move_from = f.filter(owner = request.user)
# Folder.objects.get(id = request.POST['move_to']).reports.add(rep)
# move_from.reports.remove(rep)
# messages.success(request, 'Folder successfully moved!')
return HttpResponseRedirect('/')
@login_required
def read_report(request, report_id):
if request.method == "POST":
report = Report.objects.get(id=report_id)
f = AttachmentForm(request.POST, request.FILES)
# check whether it's valid:
# print('upload' in request.FILES)
# if f.is_valid():
# # Save the form data to the database.
# # But dont yet commit, we still have some data to add.
# attachment = f.save(commit=False)
# attachment.upload_date = timezone.now()
# attachment.report = report
# # NOW we can save
# attachment.save();
# messages.success(request, 'Attachment added!')
# return render(request, 'reports/read_report.html', {'report': report, "attachment_form": AttachmentForm()})
# else:
# messages.warning(request, 'Attachment failed to add!')
# return render(request, 'reports/read_report.html', {'report': report, "attachment_form": f})
else:
report = Report.objects.get(id=report_id)
if Report.objects.filter(creator=request.user) or report.is_public():
report.save()
attachment_form = AttachmentForm()
return render(request, 'reports/read_report.html', {'report': report, "attachment_form": attachment_form})
else:
messages.warning(request, "Couldn't read your report")
return HttpResponseRedirect('/')
@login_required
def delete_attachment(request, attachment_id):
try:
attachment = Attachment.objects.get(id=attachment_id)
os.remove(attachment.upload.path)
report = attachment.report
attachment.delete()
messages.success(request, "Attachment Deleted")
return render(request, 'reports/read_report.html', {'report': report, "attachment_form": AttachmentForm()})
except:
messages.warning(request, "ERROR: Attachment Not Deleted!")
return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
@login_required
def search(request):
query_string = ''
found_entries = None
found_entries_two = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
entry_query = get_query(query_string, ['title', 'description',])
found_entries = Report.objects.filter(entry_query)
query_string = request.GET['q']
if "AND" in query_string:
query_string = request.GET['q'].replace('AND','')
entry_query = get_query(query_string, ['title', 'description', ])
all_entries = Report.objects.filter(entry_query)
if "OR" in query_string:
query_string = request.GET['q'].split('OR')
query_string_one = query_string[0]
query_string_two = query_string[1]
entry_query = get_query(query_string_one, ['title', 'description', ])
entry_query_two = get_query(query_string_two, ['title', 'description', ])
found_entries = Report.objects.filter(entry_query)
found_entries_two = Report.objects.filter(entry_query_two)
all_entries = found_entries | found_entries_two
return render_to_response('reports/search_results.html',
{'query_string': query_string, 'found_entries': all_entries},
context_instance=RequestContext(request))
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
def get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
def public(request):
reports = Report.objects.filter(public=True)
return render(request, 'reports/public.html', {'reports': reports})
@login_required
def contributors(request, report_id):
if request.method == 'POST':
group_name = request.POST.get('group_name')
group = Group.objects.get(name=group_name)
report = Report.objects.get(id=report_id)
users = group.user_set.all()
author = request.user
for user in users:
if author != user:
folder = user.folder_set.get(label="Shared With Me")
folder.reports.add(report)
folder.save()
messages.success(request, "Added group members as contributors!")
return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
@login_required
def encrypt_attachment(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
f = AttachmentForm(request.POST)
contributor = request.POST.get('contributor_name')
report = f.save(commit=False)
report.creator = request.user
report.save();
# check whether it's valid:
if f.is_valid():
# Save the form data to the database.
# But dont yet commit, we still have some data to add.
attachment = Attachment()
attachment.upload = f.cleaned_data['upload']
if attachment.encrypted:
thehash = SHA256.new(os.urandom(4))
hashstring = thehash.digest()[0:8]
attachment.key = str(hashstring)
des = DES.new(hashstring, DES.MODE_ECB)
text = str.encode(str(f.cleaned_data['upload']))
while len(text) % 8 != 0:
text += b'\0'
attachment.content = str(des.encrypt(text))
send_mail('[SecureShare] New Encrypted Attachment Key',
'The DES key for this message is '+attachment.key+'.',
'secureshare21@yahoo.com', [report.creator.email], fail_silently=False)
messages.success('email with key sent to ' + 'report.creator.email')
return HttpResponseRedirect('/')
############################################## API VIEWS ARE BELOW ##
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from .serializers import FolderSerializer, ReportSerializer, AttachmentInfoSerializer
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from django.core.files import File
from django.http import StreamingHttpResponse
from django.core.servers.basehttp import FileWrapper
import mimetypes
import os
@api_view(['GET'])
@authentication_classes((SessionAuthentication, BasicAuthentication))
@permission_classes((IsAuthenticated,))
def api_available_reports(request):
if request.method == 'GET':
folders = request.user.folder_set
serializer = FolderSerializer(folders, many=True)
return Response(serializer.data)
@api_view(['GET'])
def api_public_reports(request):
if request.method == 'GET':
reports = Report.objects.filter(public=True)
serializer = ReportSerializer(reports, many=True)
return Response(serializer.data)
@api_view(['GET'])
@authentication_classes((SessionAuthentication, BasicAuthentication))
def api_download_attachment(request, attachment_id):
thefile = Attachment.objects.get(id=attachment_id)
file_extension = "."+thefile.upload.path.split(".")[1]
if not thefile.has_access(request.user):
return HttpResponse("denied")
wrapper = FileWrapper(open(thefile.upload.path, 'rb'))
thetype = ""
try:
thetype = mimetypes.guess_type(thefile.filename())[0] + "; charset=binary"
except:
thetype = "application/bin; charset=binary"
response = HttpResponse(wrapper, content_type=thetype)
response['Content-Disposition'] = "attachment; filename=download"+file_extension
return response
@api_view(['POST'])
@authentication_classes((SessionAuthentication, BasicAuthentication))
@permission_classes((IsAuthenticated,))
def api_add_report(request):
if request.method == 'POST':
serializer = AttachmentInfoSerializer(data=request.data)
if serializer.is_valid():
a = serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Convenience functions for dealing with instances and instance templates."""
from googlecloudapis.compute.v1 import compute_v1_messages
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import constants
from googlecloudsdk.compute.lib import image_utils
MIGRATION_OPTIONS = sorted(
compute_v1_messages.Scheduling
.OnHostMaintenanceValueValuesEnum.to_dict().keys())
LOCAL_SSD_INTERFACES = sorted(
compute_v1_messages.AttachedDisk
.InterfaceValueValuesEnum.to_dict().keys())
DEFAULT_LOCAL_SSD_INTERFACE = str(
compute_v1_messages.AttachedDisk.InterfaceValueValuesEnum.SCSI)
def AddImageArgs(parser):
"""Adds arguments related to images for instances and instance-templates."""
def AddImageHelp():
return """
Specifies the boot image for the instances. For each
instance, a new boot disk will be created from the given
image. Each boot disk will have the same name as the
instance.
{alias_table}
When using this option, ``--boot-disk-device-name'' and
``--boot-disk-size'' can be used to override the boot disk's
device name and size, respectively.
By default, ``{default_image}'' is assumed for this flag.
""".format(
alias_table=image_utils.GetImageAliasTable(),
default_image=constants.DEFAULT_IMAGE)
image_choices = ['IMAGE'] + sorted(constants.IMAGE_ALIASES.keys())
image = parser.add_argument(
'--image',
help='The image that the boot disk will be initialized with.',
metavar=' | '.join(image_choices))
image.detailed_help = AddImageHelp
image_utils.AddImageProjectFlag(parser)
def AddCanIpForwardArgs(parser):
parser.add_argument(
'--can-ip-forward',
action='store_true',
help=('If provided, allows the instances to send and receive packets '
'with non-matching destination or source IP addresses.'))
def AddLocalSsdArgs(parser):
"""Adds local SSD argument for instances and instance-templates."""
local_ssd = parser.add_argument(
'--local-ssd',
action=arg_parsers.AssociativeList(spec={
'device-name': str,
'interface': (lambda x: x.upper()),
}, append=True),
help='(BETA) Specifies instances with attached local SSDs.',
metavar='PROPERTY=VALUE',
nargs='*')
local_ssd.detailed_help = """
Attaches a local SSD to the instances.
This flag is currently in BETA and may change without notice.
*device-name*::: Optional. A name that indicates the disk name
the guest operating system will see. If omitted, a device name
of the form ``local-ssd-N'' will be used.
*interface*::: Optional. The kind of disk interface exposed to the VM
for this SSD. Valid values are ``SCSI'' and ``NVME''. SCSI is
the default and is supported by more guest operating systems. NVME
may provide higher performance.
"""
def AddDiskArgs(parser):
"""Adds arguments related to disks for instances and instance-templates."""
boot_disk_device_name = parser.add_argument(
'--boot-disk-device-name',
help='The name the guest operating system will see the boot disk as.')
boot_disk_device_name.detailed_help = """\
The name the guest operating system will see for the boot disk as. This
option can only be specified if a new boot disk is being created (as
opposed to mounting an existing persistent disk).
"""
boot_disk_size = parser.add_argument(
'--boot-disk-size',
type=arg_parsers.BinarySize(lower_bound='10GB'),
help='The size of the boot disk.')
boot_disk_size.detailed_help = """\
The size of the boot disk. This option can only be specified if a new
boot disk is being created (as opposed to mounting an existing
persistent disk). The value must be a whole number followed by a size
unit of ``KB'' for kilobyte, ``MB'' for megabyte, ``GB'' for gigabyte,
or ``TB'' for terabyte. For example, ``10GB'' will produce a 10 gigabyte
disk. If omitted, a default size of 200 GB is used. The minimum size a
boot disk can have is 10 GB. Disk size must be a multiple of 1 GB.
"""
boot_disk_type = parser.add_argument(
'--boot-disk-type',
help='The type of the boot disk.')
boot_disk_type.detailed_help = """\
The type of the boot disk. This option can only be specified if a new boot
disk is being created (as opposed to mounting an existing persistent
disk). To get a list of available disk types, run
`$ gcloud compute disk-types list`.
"""
parser.add_argument(
'--no-boot-disk-auto-delete',
action='store_true',
help=('If provided, boot disks will not be automatically deleted '
'when their instances are deleted.'))
disk = parser.add_argument(
'--disk',
action=arg_parsers.AssociativeList(spec={
'name': str,
'mode': str,
'boot': str,
'device-name': str,
'auto-delete': str,
}, append=True),
help='Attaches persistent disks to the instances.',
metavar='PROPERTY=VALUE',
nargs='+')
disk.detailed_help = """
Attaches persistent disks to the instances. The disks
specified must already exist.
*name*::: The disk to attach to the instances. When creating
more than one instance and using this property, the only valid
mode for attaching the disk is read-only (see *mode* below).
*mode*::: Specifies the mode of the disk. Supported options
are ``ro'' for read-only and ``rw'' for read-write. If
omitted, ``rw'' is used as a default. It is an error for mode
to be ``rw'' when creating more than one instance because
read-write disks can only be attached to a single instance.
*boot*::: If ``yes'', indicates that this is a boot disk. The
virtual machines will use the first partition of the disk for
their root file systems. The default value for this is ``no''.
*device-name*::: An optional name that indicates the disk name
the guest operating system will see. If omitted, a device name
of the form ``persistent-disk-N'' will be used.
*auto-delete*::: If ``yes'', this persistent disk will be
automatically deleted when the instance is deleted. However,
if the disk is later detached from the instance, this option
won't apply. The default value for this is ``no''.
"""
def AddAddressArgs(parser, instances=True):
"""Adds address arguments for instances and instance-templates."""
addresses = parser.add_mutually_exclusive_group()
addresses.add_argument(
'--no-address',
action='store_true',
help=('If provided, the instances will not be assigned external IP '
'addresses.'))
address = addresses.add_argument(
'--address',
help='Assigns the given external address to the instance that is '
'created.')
if instances:
address.detailed_help = """\
Assigns the given external address to the instance that is created.
The address may be an IP address or the name or URI of an address
resource. This option can only be used when creating a single instance.
"""
else:
address.detailed_help = """\
Assigns the given external IP address to the instance that is created.
This option can only be used when creating a single instance.
"""
def AddMachineTypeArgs(parser):
machine_type = parser.add_argument(
'--machine-type',
help='Specifies the machine type used for the instances.',
default=constants.DEFAULT_MACHINE_TYPE)
machine_type.detailed_help = """\
Specifies the machine type used for the instances. To get a
list of available machine types, run 'gcloud compute
machine-types list'.
"""
def AddNetworkArgs(parser):
network = parser.add_argument(
'--network',
default=constants.DEFAULT_NETWORK,
help='Specifies the network that the instances will be part of.')
network.detailed_help = """\
Specifies the network that the instances will be part of. If
omitted, the ``default'' network is used.
"""
def AddScopeArgs(parser):
"""Adds scope arguments for instances and instance-templates."""
scopes_group = parser.add_mutually_exclusive_group()
def AddScopesHelp():
return """\
Specifies service accounts and scopes for the
instances. Service accounts generate access tokens that can be
accessed through the instance metadata server and used to
authenticate applications on the instance. The account can be
either an email address or an alias corresponding to a
service account. If account is omitted, the project's default
service account is used. The default service account can be
specified explicitly by using the alias ``default''. Example:
$ {{command}} example-instance --scopes compute-rw me@project.gserviceaccount.com=storage-rw
If this flag is not provided, the following scopes are used:
{default_scopes}. To create instances with no scopes, use
``--no-scopes'':
$ {{command}} example-instance --no-scopes
SCOPE can be either the full URI of the scope or an
alias. Available aliases are:
[options="header",format="csv",grid="none",frame="none"]
|========
Alias,URI
{aliases}
|========
""".format(
default_scopes=', '.join(constants.DEFAULT_SCOPES),
aliases='\n '.join(
','.join(value) for value in
sorted(constants.SCOPES.iteritems())))
scopes = scopes_group.add_argument(
'--scopes',
help='Specifies service accounts and scopes for the instances.',
metavar='[ACCOUNT=]SCOPE',
nargs='+')
scopes.detailed_help = AddScopesHelp
scopes_group.add_argument(
'--no-scopes',
action='store_true',
help=('If provided, the default scopes ({scopes}) are not added to the '
'instances.'.format(scopes=', '.join(constants.DEFAULT_SCOPES))))
def AddTagsArgs(parser):
tags = parser.add_argument(
'--tags',
help='A list of tags to apply to the instances.',
metavar='TAG',
nargs='+')
tags.detailed_help = """\
Specifies a list of tags to apply to the instances for
identifying the instances to which network firewall rules will
apply. See gcloud_compute_firewall-rules_create(1) for more
details.
"""
def AddNoRestartOnFailureArgs(parser):
no_restart_on_failure = parser.add_argument(
'--no-restart-on-failure',
action='store_true',
default=False,
help=('If provided, the instances are not restarted if they are '
'terminated by Compute Engine.'))
no_restart_on_failure.detailed_help = """\
If provided, the instances will not be restarted if they are
terminated by Compute Engine. By default, failed instances will be
restarted. This does not affect terminations performed by the user.
"""
def AddMaintenancePolicyArgs(parser):
maintenance_policy = parser.add_argument(
'--maintenance-policy',
choices=MIGRATION_OPTIONS,
type=lambda x: x.upper(),
help=('Specifies the behavior of the instances when their host '
'machines undergo maintenance.'))
maintenance_policy.detailed_help = """\
Specifies the behavior of the instances when their host machines
undergo maintenance. ``TERMINATE'' indicates that the instances
should be terminated. ``MIGRATE'' indicates that the instances
should be migrated to a new host. Choosing ``MIGRATE'' will
temporarily impact the performance of instances during a
migration event. If omitted, ``MIGRATE'' is assumed.
"""
def ValidateLocalSsdFlags(args):
for local_ssd in args.local_ssd or []:
interface = local_ssd.get('interface')
if interface and interface not in LOCAL_SSD_INTERFACES:
raise exceptions.ToolException(
'Unexpected local SSD interface: [{given}]. '
'Legal values are [{ok}].'
.format(given=interface,
ok=', '.join(LOCAL_SSD_INTERFACES)))
def CreateLocalSsdMessage(command, device_name, interface, zone=None):
"""Create a message representing a local ssd."""
if zone:
disk_type_ref = command.CreateZonalReference('local-ssd', zone,
resource_type='diskTypes')
disk_type = disk_type_ref.SelfLink()
else:
disk_type = 'local-ssd'
maybe_interface_enum = (
command.messages.AttachedDisk.InterfaceValueValuesEnum(interface)
if interface else None)
return command.messages.AttachedDisk(
type=command.messages.AttachedDisk.TypeValueValuesEnum.SCRATCH,
autoDelete=True,
deviceName=device_name,
interface=maybe_interface_enum,
mode=command.messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
initializeParams=command.messages.AttachedDiskInitializeParams(
diskType=disk_type),
)
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent
sys.path.append("../common/tests")
from testtools.matchers import Equals, Contains, Not
from test_utils import *
import test_common
import test_case
from vnc_api.vnc_api import *
try:
import to_bgp
except ImportError:
from schema_transformer import to_bgp
from gevent import sleep
def retry_exc_handler(tries_remaining, exception, delay):
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (exception, tries_remaining, delay)
def retries(max_tries, delay=1, backoff=1, exceptions=(Exception,), hook=None):
def dec(func):
def f2(*args, **kwargs):
mydelay = delay
tries = range(max_tries)
tries.reverse()
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
if hook is not None:
hook(tries_remaining, e, mydelay)
sleep(mydelay)
mydelay = mydelay * backoff
else:
raise
else:
break
return f2
return dec
class TestPolicy(test_case.STTestCase):
@retries(5, hook=retry_exc_handler)
def check_ri_asn(self, fq_name, rt_target):
ri = self._vnc_lib.routing_instance_read(fq_name)
rt_refs = ri.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
for rt_ref in rt_refs:
if rt_ref['to'][0] == rt_target:
return
raise Exception('rt_target %s not found in ri %s' % (rt_target, fq_name))
@retries(5, hook=retry_exc_handler)
def check_bgp_asn(self, fq_name, asn):
router = self._vnc_lib.bgp_router_read(fq_name)
params = router.get_bgp_router_parameters()
if not params:
print "retrying ... ", test_common.lineno()
raise Exception('bgp params is None for %s' % fq_name)
self.assertEqual(params.get_autonomous_system(), asn)
@retries(5, hook=retry_exc_handler)
def check_lr_asn(self, fq_name, rt_target):
router = self._vnc_lib.logical_router_read(fq_name)
rt_refs = router.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
self.assertEqual(rt_refs[0]['to'][0], rt_target)
@retries(5, hook=retry_exc_handler)
def check_service_chain_prefix_match(self, fq_name, prefix):
ri = self._vnc_lib.routing_instance_read(fq_name)
sci = ri.get_service_chain_information()
if sci is None:
print "retrying ... ", test_common.lineno()
raise Exception('Service chain info not found for %s' % fq_name)
self.assertEqual(sci.prefix[0], prefix)
@retries(5, hook=retry_exc_handler)
def check_service_chain_info(self, fq_name, ri_fq, si, src_ri):
ri = self._vnc_lib.routing_instance_read(fq_name)
sci = ri.get_service_chain_information()
if sci is None:
print "retrying ... ", test_common.lineno()
raise Exception('Service chain info not found for %s' % fq_name)
self.assertEqual(sci.routing_instance, ri_fq)
self.assertEqual(sci.source_routing_instance, src_ri)
self.assertEqual(sci.service_instance, si)
@retries(5, hook=retry_exc_handler)
def check_service_chain_pbf_rules(self, service_fq_name, vmi_fq_name, macs):
vmi = self._vnc_lib.virtual_machine_interface_read(vmi_fq_name)
ri_refs = vmi.get_routing_instance_refs()
for ri_ref in ri_refs:
sc_name = ri_ref['to']
if sc_name == service_fq_name:
pbf_rule = ri_ref['attr']
self.assertTrue(pbf_rule.service_chain_address != None)
self.assertTrue(pbf_rule.vlan_tag != None)
self.assertTrue(pbf_rule.direction == 'both')
self.assertTrue(pbf_rule.src_mac == macs[0])
self.assertTrue(pbf_rule.dst_mac == macs[1])
return
raise Exception('Service chain pbf rules not found for %s' % service_fq_name)
@retries(5, hook=retry_exc_handler)
def check_service_chain_ip(self, sc_name):
_SC_IP_CF = 'service_chain_ip_address_table'
cf = CassandraCFs.get_cf(_SC_IP_CF)
ip = cf.get(sc_name)['ip_address']
@retries(5, hook=retry_exc_handler)
def check_ri_rt_state_vn_policy(self, fq_name, to_fq_name, expect_to_find):
ri = self._vnc_lib.routing_instance_read(fq_name)
rt_refs = ri.get_route_target_refs()
if not rt_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
found = False
for rt_ref in rt_refs:
rt_obj = self._vnc_lib.route_target_read(id=rt_ref['uuid'])
ri_refs = rt_obj.get_routing_instance_back_refs()
for ri_ref in ri_refs:
if ri_ref['to'] == to_fq_name:
found = True
break
if found == True:
break
self.assertTrue(found == expect_to_find)
@retries(5, hook=retry_exc_handler)
def check_ri_state_vn_policy(self, fq_name, to_fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
ri_refs = ri.get_routing_instance_refs()
if not ri_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs is None for %s' % fq_name)
found = False
for ri_ref in ri_refs:
if ri_ref['to'] == to_fq_name:
found = True
break
self.assertTrue(found)
@retries(5, hook=retry_exc_handler)
def check_ri_refs_are_deleted(self, fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
ri_refs = ri.get_routing_instance_refs()
if ri_refs:
print "retrying ... ", test_common.lineno()
raise Exception('ri_refs still exist for %s' % fq_name)
@retries(5, hook=retry_exc_handler)
def delete_vn(self, fq_name):
try:
self._vnc_lib.virtual_network_delete(fq_name=fq_name)
print 'vn deleted'
except RefsExistError:
print "retrying ... ", test_common.lineno()
raise Exception('virtual network %s still exists' % str(fq_name))
@retries(5, hook=retry_exc_handler)
def check_vn_is_deleted(self, uuid):
try:
self._vnc_lib.virtual_network_read(id=uuid)
print "retrying ... ", test_common.lineno()
raise Exception('virtual network %s still exists' % uuid)
except NoIdError:
print 'vn deleted'
@retries(5, hook=retry_exc_handler)
def check_ri_is_deleted(self, fq_name):
try:
self._vnc_lib.routing_instance_read(fq_name)
print "retrying ... ", test_common.lineno()
raise Exception('routing instance %s still exists' % fq_name)
except NoIdError:
print 'ri deleted'
@retries(5, hook=retry_exc_handler)
def check_ri_is_present(self, fq_name):
self._vnc_lib.routing_instance_read(fq_name)
@retries(5, hook=retry_exc_handler)
def check_link_in_ifmap_graph(self, fq_name_str, links):
self._vnc_lib.routing_instance_read(fq_name)
@retries(5, hook=retry_exc_handler)
def wait_to_get_sc(self):
sc = [x for x in to_bgp.ServiceChain]
if len(sc) == 0:
print "retrying ... ", test_common.lineno()
raise Exception('Service chain not found')
return sc
@retries(5, hook=retry_exc_handler)
def wait_to_get_link(self, ident_name, link_fq_name):
self.assertThat(str(FakeIfmapClient._graph[ident_name]['links']), Contains(link_fq_name))
@retries(5, hook=retry_exc_handler)
def wait_to_remove_link(self, ident_name, link_fq_name):
self.assertThat(str(FakeIfmapClient._graph[ident_name]['links']), Not(Contains(link_fq_name)))
@retries(5, hook=retry_exc_handler)
def wait_to_get_sg_id(self, sg_fq_name):
sg_obj = self._vnc_lib.security_group_read(sg_fq_name)
if sg_obj.get_security_group_id() is None:
raise Exception('Security Group Id is none %s' % str(sg_fq_name))
@retries(5, hook=retry_exc_handler)
def check_acl_match_dst_cidr(self, fq_name, ip_prefix, ip_len):
acl = self._vnc_lib.access_control_list_read(fq_name)
for rule in acl.access_control_list_entries.acl_rule:
if (rule.match_condition.dst_address.subnet is not None and
rule.match_condition.dst_address.subnet.ip_prefix == ip_prefix and
rule.match_condition.dst_address.subnet.ip_prefix_len == ip_len):
return
raise Exception('prefix %s/%d not found in ACL rules for %s' %
(ip_prefix, ip_len, fq_name))
@retries(5, hook=retry_exc_handler)
def check_acl_match_nets(self, fq_name, vn1_fq_name, vn2_fq_name):
acl = self._vnc_lib.access_control_list_read(fq_name)
for rule in acl.access_control_list_entries.acl_rule:
if (rule.match_condition.src_address.virtual_network == vn1_fq_name and
rule.match_condition.dst_address.virtual_network == vn2_fq_name):
return
raise Exception('nets %s/%s not found in ACL rules for %s' %
(vn1_fq_name, vn2_fq_name, fq_name))
@retries(5, hook=retry_exc_handler)
def check_acl_match_sg(self, fq_name, acl_name, sg_id, is_all_rules = False):
sg_obj = self._vnc_lib.security_group_read(fq_name)
acls = sg_obj.get_access_control_lists()
acl = None
for acl_to in acls or []:
if (acl_to['to'][-1] == acl_name):
acl = self._vnc_lib.access_control_list_read(id=acl_to['uuid'])
break
self.assertTrue(acl != None)
match = False
for rule in acl.access_control_list_entries.acl_rule:
if acl_name == 'egress-access-control-list':
if rule.match_condition.dst_address.security_group != sg_id:
if is_all_rules:
raise Exception('sg %s/%s not found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
else:
match = True
break
if acl_name == 'ingress-access-control-list':
if rule.match_condition.src_address.security_group != sg_id:
if is_all_rules:
raise Exception('sg %s/%s not found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
else:
match = True
break
if match == False:
raise Exception('sg %s/%s not found in %s' %
(str(fq_name), str(sg_id), acl_name))
return
@retries(5, hook=retry_exc_handler)
def check_no_policies_for_sg(self, fq_name):
try:
sg_obj = self._vnc_lib.security_group_read(fq_name)
sg_entries = sg_obj.get_security_group_entries()
if sg_entries.get_policy_rule():
raise Exception('sg %s found policies' % (str(fq_name)))
except NoIdError:
pass
@retries(5, hook=retry_exc_handler)
def check_acl_not_match_sg(self, fq_name, acl_name, sg_id):
try:
sg_obj = self._vnc_lib.security_group_read(fq_name)
acls = sg_obj.get_access_control_lists()
acl = None
for acl_to in acls or []:
if (acl_to['to'][-1] != acl_name):
continue
acl = self._vnc_lib.access_control_list_read(id=acl_to['uuid'])
if acl == None:
return
for rule in acl.access_control_list_entries.acl_rule:
if acl_name == 'egress-access-control-list':
if rule.match_condition.dst_address.security_group == sg_id:
raise Exception('sg %s/%s found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
if acl_name == 'ingress-access-control-list':
if rule.match_condition.src_address.security_group == sg_id:
raise Exception('sg %s/%s found in %s - for some rule' %
(str(fq_name), str(sg_id), acl_name))
except NoIdError:
pass
@retries(5, hook=retry_exc_handler)
def check_acl_not_match_nets(self, fq_name, vn1_fq_name, vn2_fq_name):
acl = None
try:
acl = self._vnc_lib.access_control_list_read(fq_name)
except NoIdError:
return
found = False
for rule in acl.access_control_list_entries.acl_rule:
if (rule.match_condition.src_address.virtual_network == vn1_fq_name and
rule.match_condition.dst_address.virtual_network == vn2_fq_name):
found = True
if found == True:
raise Exception('nets %s/%s found in ACL rules for %s' %
(vn1_fq_name, vn2_fq_name, fq_name))
return
@retries(5, hook=retry_exc_handler)
def check_acl_not_match_mirror_to_ip(self, fq_name):
acl = None
try:
acl = self._vnc_lib.access_control_list_read(fq_name)
except NoIdError:
return
for rule in acl.access_control_list_entries.acl_rule:
if (rule.action_list.mirror_to.analyzer_ip_address is not None):
raise Exception('mirror to ip %s found in ACL rules for %s' % (fq_name))
return
@retries(5, hook=retry_exc_handler)
def check_acl_match_mirror_to_ip(self, fq_name):
acl = self._vnc_lib.access_control_list_read(fq_name)
for rule in acl.access_control_list_entries.acl_rule:
if (rule.action_list.mirror_to.analyzer_ip_address is not None):
return
raise Exception('mirror to ip not found in ACL rules for %s' % (fq_name))
@retries(5, hook=retry_exc_handler)
def check_route_target_in_routing_instance(self, ri_name, rt_list):
ri_obj = self._vnc_lib.routing_instance_read(fq_name=ri_name)
ri_rt_refs = set([ref['to'][0] for ref in ri_obj.get_route_target_refs() or []])
self.assertTrue(set(rt_list) <= ri_rt_refs)
def get_ri_name(self, vn, ri_name=None):
return vn.get_fq_name() + [ri_name or vn.name]
def test_basic_policy(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np = self.create_network_policy(vn1_obj, vn2_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn2_obj))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj),
self.get_ri_name(vn1_obj))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn2_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_basic_policy
def test_multiple_policy(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np1 = self.create_network_policy(vn1_obj, vn2_obj)
np2 = self.create_network_policy(vn2_obj, vn1_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np1, vnp)
vn2_obj.set_network_policy(np2, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn2_obj))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj),
self.get_ri_name(vn1_obj))
np1.network_policy_entries.policy_rule[0].action_list.simple_action = 'deny'
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
expr =("('contrail:connection contrail:routing-instance:%s' in FakeIfmapClient._graph['contrail:routing-instance:%s']['links'])"
% (':'.join(self.get_ri_name(vn2_obj)),
':'.join(self.get_ri_name(vn1_obj))))
self.assertTill(expr)
np1.network_policy_entries.policy_rule[0].action_list.simple_action = 'pass'
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
np2.network_policy_entries.policy_rule[0].action_list.simple_action = 'deny'
np2.set_network_policy_entries(np2.network_policy_entries)
self._vnc_lib.network_policy_update(np2)
expr = ("('contrail:connection contrail:routing-instance:%s' in FakeIfmapClient._graph['contrail:routing-instance:%s']['links'])"
% (':'.join(self.get_ri_name(vn1_obj)),
':'.join(self.get_ri_name(vn2_obj))))
self.assertTill(expr)
vn1_obj.del_network_policy(np1)
vn2_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn2_obj))
self.delete_network_policy(np1)
self.delete_network_policy(np2)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
# end test_multiple_policy
def test_policy_in_policy(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn3_name = self.id() + 'vn3'
vn1_obj = VirtualNetwork(vn1_name)
vn2_obj = VirtualNetwork(vn2_name)
np1 = self.create_network_policy(vn1_obj, vn2_obj)
np2 = self.create_network_policy(vn2_obj, vn1_obj)
np1.network_policy_entries.policy_rule[0].dst_addresses[0].virtual_network = None
np1.network_policy_entries.policy_rule[0].dst_addresses[0].network_policy = np2.get_fq_name_str()
np1.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np1)
np2.network_policy_entries.policy_rule[0].src_addresses[0].virtual_network = 'local'
np2.set_network_policy_entries(np1.network_policy_entries)
self._vnc_lib.network_policy_update(np2)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np1, vnp)
vn2_obj.set_network_policy(np2, vnp)
vn1_uuid = self._vnc_lib.virtual_network_create(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_create(vn2_obj)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn2_obj))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj),
self.get_ri_name(vn1_obj))
vn3_obj = VirtualNetwork(vn3_name)
vn3_obj.set_network_policy(np2, vnp)
vn3_uuid = self._vnc_lib.virtual_network_create(vn3_obj)
self.check_ri_state_vn_policy(self.get_ri_name(vn3_obj),
self.get_ri_name(vn1_obj))
vn3_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn3_obj)
@retries(5, hook=retry_exc_handler)
def _match_acl_rule():
acl = self._vnc_lib.access_control_list_read(
fq_name=self.get_ri_name(vn1_obj))
for rule in acl.get_access_control_list_entries().get_acl_rule():
if rule.match_condition.dst_address.virtual_network == vn3_obj.get_fq_name_str():
raise Exception("ACL rule still present")
_match_acl_rule()
vn1_obj.del_network_policy(np1)
vn2_obj.del_network_policy(np2)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.delete_network_policy(np1)
self.delete_network_policy(np2)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn3_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
# end test_multiple_policy
def test_service_policy(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_' + service_name
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
self.check_service_chain_prefix_match(fq_name=self.get_ri_name(vn2_obj, sc_ri_name),
prefix='10.0.0.0/24')
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_service_policy
def test_service_policy_no_vm(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
np.network_policy_entries.policy_rule[0].action_list.apply_service = ["default-domain:default-project:"+service_name]
np.set_network_policy_entries(np.network_policy_entries)
self._vnc_lib.network_policy_update(np)
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_' + service_name
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_name))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
np.network_policy_entries.policy_rule[0].action_list.apply_service = []
np.set_network_policy_entries(np.network_policy_entries)
self._vnc_lib.network_policy_update(np)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_service_policy_no_vm
def test_multi_service_in_policy(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_names = [self.id() + 's1', self.id() + 's2', self.id() + 's3']
np = self.create_network_policy(vn1_obj, vn2_obj, service_names, "in-network", auto_policy=False)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_update(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_update(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
sc = self.wait_to_get_sc()
sc_ri_names = ['service-'+sc[0]+'-default-domain_default-project_' + s for s in service_names]
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_names[2]),
self.get_ri_name(vn2_obj))
self.check_service_chain_prefix_match(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[2]),
prefix='20.0.0.0/24')
si_name = 'default-domain:default-project:test.test_service.TestPolicy.test_multi_service_in_policys3'
self.check_service_chain_info(self.get_ri_name(vn1_obj, sc_ri_names[2]),
':'.join(self.get_ri_name(vn2_obj)), si_name, ':'.join(self.get_ri_name(vn1_obj)))
self.check_service_chain_info(self.get_ri_name(vn2_obj, sc_ri_names[2]),
':'.join(self.get_ri_name(vn1_obj)), si_name, ':'.join(self.get_ri_name(vn2_obj)))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj, sc_ri_names[0]))
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self.delete_vn(fq_name=vn1_obj.get_fq_name())
self.delete_vn(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_multi_service_in_policy
def test_multi_service_policy(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_names = [self.id() + 's1', self.id() + 's2', self.id() + 's3']
np = self.create_network_policy(vn1_obj, vn2_obj, service_names)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
sc = self.wait_to_get_sc()
sc_ri_names = ['service-'+sc[0]+'-default-domain_default-project_' + s for s in service_names]
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_names[-1]),
self.get_ri_name(vn2_obj))
self.check_service_chain_prefix_match(fq_name=self.get_ri_name(vn2_obj, sc_ri_names[0]),
prefix='10.0.0.0/24')
self.check_service_chain_ip(sc_ri_names[0])
self.check_service_chain_ip(sc_ri_names[1])
self.check_service_chain_ip(sc_ri_names[2])
sc_fq_names = [
self.get_ri_name(vn1_obj, sc_ri_names[0]),
self.get_ri_name(vn2_obj, sc_ri_names[0]),
self.get_ri_name(vn1_obj, sc_ri_names[1]),
self.get_ri_name(vn2_obj, sc_ri_names[1]),
self.get_ri_name(vn1_obj, sc_ri_names[2]),
self.get_ri_name(vn2_obj, sc_ri_names[2])
]
vmi_fq_names = [
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys1__1__left__1'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys1__1__right__2'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys2__1__left__1'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys2__1__right__2'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys3__1__left__1'],
['default-domain', 'default-project',
'default-domain__default-project__test.test_service.TestPolicy.test_multi_service_policys3__1__right__2']
]
mac1 = '02:00:00:00:00:01'
mac2 = '02:00:00:00:00:02'
self.check_service_chain_pbf_rules(sc_fq_names[0], vmi_fq_names[0], [mac1, mac2])
self.check_service_chain_pbf_rules(sc_fq_names[1], vmi_fq_names[1], [mac2, mac1])
self.check_service_chain_pbf_rules(sc_fq_names[2], vmi_fq_names[2], [mac1, mac2])
self.check_service_chain_pbf_rules(sc_fq_names[3], vmi_fq_names[3], [mac2, mac1])
self.check_service_chain_pbf_rules(sc_fq_names[4], vmi_fq_names[4], [mac1, mac2])
self.check_service_chain_pbf_rules(sc_fq_names[5], vmi_fq_names[5], [mac2, mac1])
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[0]))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[1]))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_names[2]))
vn1_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
# end test_service_policy
# end class TestPolicy
#class TestRouteTable(test_case.STTestCase):
def test_add_delete_route(self):
lvn_name = self.id() + 'lvn'
rvn_name = self.id() + 'rvn'
lvn = self.create_virtual_network(lvn_name, "10.0.0.0/24")
rvn = self.create_virtual_network(rvn_name, "20.0.0.0/24")
service_name = self.id() + 's1'
np = self.create_network_policy(lvn, rvn, [service_name], "in-network")
vn_name = self.id() + 'vn100'
vn = self.create_virtual_network(vn_name, "1.0.0.0/24")
rtgt_list = RouteTargetList(route_target=['target:1:1'])
vn.set_route_target_list(rtgt_list)
self._vnc_lib.virtual_network_update(vn)
rt = RouteTable("rt1")
self._vnc_lib.route_table_create(rt)
vn.add_route_table(rt)
self._vnc_lib.virtual_network_update(vn)
routes = RouteTableType()
route = RouteType(prefix="0.0.0.0/0",
next_hop="default-domain:default-project:"+service_name)
routes.add_route(route)
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
@retries(5, hook=retry_exc_handler)
def _match_route_table(rtgt_list):
sc = [x for x in to_bgp.ServiceChain]
if len(sc) == 0:
raise Exception("sc has 0 len")
sc_ri_name = ('service-'+sc[0] +
'-default-domain_default-project_' + service_name)
lri = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(lvn, sc_ri_name))
sr = lri.get_static_route_entries()
if sr is None:
raise Exception("sr is None")
route = sr.route[0]
self.assertEqual(route.prefix, "0.0.0.0/0")
self.assertEqual(route.next_hop, "10.0.0.252")
for rtgt in rtgt_list:
self.assertIn(rtgt, route.route_target)
ri100 = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(vn))
rt100 = ri100.get_route_target_refs()[0]['to']
for rt_ref in lri.get_route_target_refs() or []:
if rt100 == rt_ref['to']:
return sc_ri_name, rt100
raise Exception("rt100 route-target ref not found")
sc_ri_name, rt100 = _match_route_table(rtgt_list.get_route_target())
rtgt_list.add_route_target('target:1:2')
vn.set_route_target_list(rtgt_list)
self._vnc_lib.virtual_network_update(vn)
_match_route_table(rtgt_list.get_route_target())
rtgt_list.delete_route_target('target:1:1')
vn.set_route_target_list(rtgt_list)
self._vnc_lib.virtual_network_update(vn)
_match_route_table(rtgt_list.get_route_target())
routes.set_route([])
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
@retries(5, hook=retry_exc_handler)
def _match_route_table_cleanup(sc_ri_name, rt100):
lri = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(lvn, sc_ri_name))
sr = lri.get_static_route_entries()
if sr and sr.route:
raise Exception("sr has route")
ri = self._vnc_lib.routing_instance_read(
fq_name=self.get_ri_name(lvn))
rt_refs = ri.get_route_target_refs()
for rt_ref in ri.get_route_target_refs() or []:
if rt100 == rt_ref['to']:
raise Exception("rt100 route-target ref found")
_match_route_table_cleanup(sc_ri_name, rt100)
# add the route again, then delete the network without deleting the
# link to route table
routes.add_route(route)
rt.set_routes(routes)
self._vnc_lib.route_table_update(rt)
_match_route_table(rtgt_list.get_route_target())
self._vnc_lib.virtual_network_delete(fq_name=vn.get_fq_name())
_match_route_table_cleanup(sc_ri_name, rt100)
self._vnc_lib.route_table_delete(fq_name=rt.get_fq_name())
self.delete_network_policy(np, auto_policy=True)
gevent.sleep(2)
self._vnc_lib.virtual_network_delete(fq_name=lvn.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=rvn.get_fq_name())
# test_add_delete_route
def test_vn_delete(self):
vn_name = self.id() + 'vn'
vn = self.create_virtual_network(vn_name, "10.1.1.0/24")
gevent.sleep(2)
for obj in [vn]:
ident_name = self.get_obj_imid(obj)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_vn_ri_state(fq_name=self.get_ri_name(vn))
# stop st
self._st_greenlet.kill()
gevent.sleep(5)
# delete vn in api server
self._vnc_lib.virtual_network_delete(fq_name=vn.get_fq_name())
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self.id(), self._api_server_ip, self._api_server_port)
gevent.sleep(2)
# check if vn is deleted
self.check_vn_is_deleted(uuid=vn.uuid)
# check if ri is deleted
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn))
# test_vn_delete
@retries(5, hook=retry_exc_handler)
def check_vn_ri_state(self, fq_name):
ri = self._vnc_lib.routing_instance_read(fq_name)
def test_policy_with_cidr(self):
vn1_name = self.id() + 'vn1'
vn2_name = self.id() + 'vn2'
vn1 = self.create_virtual_network(vn1_name, "10.1.1.0/24")
vn2 = self.create_virtual_network(vn2_name, "10.2.1.0/24")
rules = []
rule1 = { "protocol": "icmp",
"direction": "<>",
"src-port": "any",
"src": {"type": "vn", "value": vn1},
"dst": {"type": "cidr", "value": "10.2.1.1/32"},
"dst-port": "any",
"action": "deny"
}
rule2 = { "protocol": "icmp",
"direction": "<>",
"src-port": "any",
"src": {"type": "vn", "value": vn1},
"dst": {"type": "cidr", "value": "10.2.1.2/32"},
"dst-port": "any",
"action": "deny"
}
rules.append(rule1)
rules.append(rule2)
np = self.create_network_policy_with_multiple_rules(rules)
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1)
for obj in [vn1]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_vn_ri_state(fq_name=self.get_ri_name(vn1))
self.check_acl_match_dst_cidr(fq_name=self.get_ri_name(vn1),
ip_prefix="10.2.1.1", ip_len=32)
self.check_acl_match_dst_cidr(fq_name=self.get_ri_name(vn1),
ip_prefix="10.2.1.2", ip_len=32)
#cleanup
self.delete_network_policy(np, auto_policy=True)
self._vnc_lib.virtual_network_delete(fq_name=vn1.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2.get_fq_name())
# check if vn is deleted
self.check_vn_is_deleted(uuid=vn1.uuid)
# test st restart while service chain is configured
def test_st_restart_service_chain_delete(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn1_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn1_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.clear_pending_updates()
vn2_obj.clear_pending_updates()
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
sc = self.wait_to_get_sc()
sc_ri_name = ('service-' + sc[0] + '-default-domain_default-project_'
+ service_name)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
# stop st
test_common.kill_schema_transformer(self._st_greenlet)
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
gevent.sleep(3)
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self.id(), self._api_server_ip, self._api_server_port)
#check if all ri's are deleted
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj, sc_ri_name))
#end
# test service chain configuration while st is restarted
def test_st_restart_service_chain(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name])
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
# stop st and wait for sometime
test_common.kill_schema_transformer(self._st_greenlet)
gevent.sleep(5)
# start st on a free port
self._st_greenlet = gevent.spawn(test_common.launch_schema_transformer,
self.id(), self._api_server_ip, self._api_server_port)
#check service chain state
sc = self.wait_to_get_sc()
sc_ri_name = ('service-' + sc[0] + '-default-domain_default-project_'
+ service_name)
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
#cleanup
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_ri_refs_are_deleted(fq_name=self.get_ri_name(vn1_obj))
self.delete_network_policy(np)
self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())
self._vnc_lib.virtual_network_delete(fq_name=vn2_obj.get_fq_name())
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
#check if all ri's are deleted
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_is_deleted(fq_name=self.get_ri_name(vn2_obj, sc_ri_name))
#end
# test logical router functionality
def test_logical_router(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create virtual machine interface
vmi_name = self.id() + 'vmi1'
vmi = VirtualMachineInterface(vmi_name, parent_type='project', fq_name=['default-domain', 'default-project', vmi_name])
vmi.add_virtual_network(vn1_obj)
self._vnc_lib.virtual_machine_interface_create(vmi)
# create logical router
lr_name = self.id() + 'lr1'
lr = LogicalRouter(lr_name)
rtgt_list = RouteTargetList(route_target=['target:1:1'])
lr.set_configured_route_target_list(rtgt_list)
lr.add_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_create(lr)
ri_name = self.get_ri_name(vn1_obj)
self.check_route_target_in_routing_instance(ri_name, rtgt_list.get_route_target())
rtgt_list.add_route_target('target:1:2')
lr.set_configured_route_target_list(rtgt_list)
self._vnc_lib.logical_router_update(lr)
self.check_route_target_in_routing_instance(ri_name, rtgt_list.get_route_target())
rtgt_list.delete_route_target('target:1:1')
lr.set_configured_route_target_list(rtgt_list)
self._vnc_lib.logical_router_update(lr)
self.check_route_target_in_routing_instance(ri_name, rtgt_list.get_route_target())
lr.del_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_update(lr)
self._vnc_lib.virtual_machine_interface_delete(id=vmi.uuid)
self._vnc_lib.virtual_network_delete(id=vn1_obj.uuid)
self.check_vn_is_deleted(uuid=vn1_obj.uuid)
self._vnc_lib.logical_router_delete(id=lr.uuid)
@retries(5, hook=retry_exc_handler)
def check_bgp_peering(self, router1, router2, length):
r1 = self._vnc_lib.bgp_router_read(fq_name=router1.get_fq_name())
ref_names = [ref['to'] for ref in r1.get_bgp_router_refs() or []]
self.assertEqual(len(ref_names), length)
self.assertThat(ref_names, Contains(router2.get_fq_name()))
def create_bgp_router(self, name, vendor, asn=None):
ip_fabric_ri = self._vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project', 'ip-fabric', '__default__'])
router = BgpRouter(name, parent_obj=ip_fabric_ri)
params = BgpRouterParams()
params.vendor = 'contrail'
params.autonomous_system = asn
router.set_bgp_router_parameters(params)
self._vnc_lib.bgp_router_create(router)
return router
def test_ibgp_auto_mesh(self):
# create router1
r1_name = self.id() + 'router1'
router1 = self.create_bgp_router(r1_name, 'contrail')
# create router2
r2_name = self.id() + 'router2'
router2 = self.create_bgp_router(r2_name, 'contrail')
self.check_bgp_peering(router1, router2, 1)
r3_name = self.id() + 'router3'
router3 = self.create_bgp_router(r3_name, 'juniper', 1)
self.check_bgp_peering(router1, router2, 1)
params = router3.get_bgp_router_parameters()
params.autonomous_system = 64512
router3.set_bgp_router_parameters(params)
self._vnc_lib.bgp_router_update(router3)
self.check_bgp_peering(router1, router3, 2)
r4_name = self.id() + 'router4'
router4 = self.create_bgp_router(r4_name, 'juniper', 1)
gsc = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
gsc.set_autonomous_system(1)
self.check_bgp_peering(router1, router4, 3)
self._vnc_lib.bgp_router_delete(id=router1.uuid)
self._vnc_lib.bgp_router_delete(id=router2.uuid)
self._vnc_lib.bgp_router_delete(id=router3.uuid)
self._vnc_lib.bgp_router_delete(id=router4.uuid)
gevent.sleep(1)
@retries(10, hook=retry_exc_handler)
def check_vrf_assign_table(self, vmi_fq_name, floating_ip, is_present = True):
vmi = self._vnc_lib.virtual_machine_interface_read(vmi_fq_name)
if is_present:
self.assertEqual(vmi.get_vrf_assign_table().vrf_assign_rule[1].match_condition.src_address.subnet.ip_prefix, floating_ip)
else:
try:
self.assertEqual(vmi.get_vrf_assign_table().vrf_assign_rule[1].match_condition.src_address.subnet.ip_prefix, floating_ip)
raise Exception('floating is still present: ' + floating_ip)
except:
pass
def test_analyzer(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name], 'transparent', 'analyzer', action_type = 'mirror-to')
seq = SequenceType(1, 1)
vnp = VirtualNetworkPolicyType(seq)
vn1_obj.set_network_policy(np, vnp)
vn2_obj.set_network_policy(np, vnp)
vn1_uuid = self._vnc_lib.virtual_network_update(vn1_obj)
vn2_uuid = self._vnc_lib.virtual_network_update(vn2_obj)
for obj in [vn1_obj, vn2_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
svc_ri_fq_name = 'default-domain:default-project:svc-vn-left:svc-vn-left'.split(':')
self.check_ri_state_vn_policy(svc_ri_fq_name, self.get_ri_name(vn1_obj))
self.check_ri_state_vn_policy(svc_ri_fq_name, self.get_ri_name(vn2_obj))
self.check_acl_match_mirror_to_ip(self.get_ri_name(vn1_obj))
self.check_acl_match_nets(self.get_ri_name(vn1_obj), ':'.join(vn1_obj.get_fq_name()), ':'.join(vn2_obj.get_fq_name()))
self.check_acl_match_nets(self.get_ri_name(vn2_obj), ':'.join(vn2_obj.get_fq_name()), ':'.join(vn1_obj.get_fq_name()))
vn1_obj.del_network_policy(np)
vn2_obj.del_network_policy(np)
self._vnc_lib.virtual_network_update(vn1_obj)
self._vnc_lib.virtual_network_update(vn2_obj)
self.check_acl_not_match_mirror_to_ip(self.get_ri_name(vn1_obj))
self.check_acl_not_match_nets(self.get_ri_name(vn1_obj), ':'.join(vn1_obj.get_fq_name()), ':'.join(vn2_obj.get_fq_name()))
self.check_acl_not_match_nets(self.get_ri_name(vn2_obj), ':'.join(vn2_obj.get_fq_name()), ':'.join(vn1_obj.get_fq_name()))
@retries(5, hook=retry_exc_handler)
def check_security_group_id(self, sg_fq_name, verify_sg_id = None):
sg = self._vnc_lib.security_group_read(sg_fq_name)
sg_id = sg.get_security_group_id()
if sg_id is None:
raise Exception('sg id is not present for %s' % sg_fq_name)
if verify_sg_id is not None and str(sg_id) != str(verify_sg_id):
raise Exception('sg id is not same as passed value (%s, %s)' % (str(sg_id), str(verify_sg_id)))
def _security_group_rule_build(self, rule_info, sg_uuid):
protocol = rule_info['protocol']
port_min = rule_info['port_min'] or 0
port_max = rule_info['port_max'] or 65535
direction = rule_info['direction'] or 'ingress'
ip_prefix = rule_info['ip_prefix']
ether_type = rule_info['ether_type']
sg_id = rule_info['sg_id']
if ip_prefix:
cidr = ip_prefix.split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
endpt = [AddressType(subnet=SubnetType(pfx, pfx_len))]
elif sg_id:
try:
sg_obj = self._vnc_lib.security_group_read(id=sg_uuid)
except NoIdError:
raise Exception('SecurityGroupNotFound %s' % sg_uuid)
endpt = [AddressType(security_group=sg_obj.get_fq_name_str())]
local = None
remote = None
if direction == 'ingress':
dir = '>'
local = endpt
remote = [AddressType(security_group='local')]
else:
dir = '>'
remote = endpt
local = [AddressType(security_group='local')]
if not protocol:
protocol = 'any'
if protocol.isdigit():
protocol = int(protocol)
if protocol < 0 or protocol > 255:
raise Exception('SecurityGroupRuleInvalidProtocol-%s' % protocol)
else:
if protocol not in ['any', 'tcp', 'udp', 'icmp']:
raise Exception('SecurityGroupRuleInvalidProtocol-%s' % protocol)
if not ip_prefix and not sg_id:
if not ether_type:
ether_type = 'IPv4'
sgr_uuid = str(uuid.uuid4())
rule = PolicyRuleType(rule_uuid=sgr_uuid, direction=dir,
protocol=protocol,
src_addresses=local,
src_ports=[PortType(0, 65535)],
dst_addresses=remote,
dst_ports=[PortType(port_min, port_max)],
ethertype=ether_type)
return rule
#end _security_group_rule_build
def _security_group_rule_append(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
if rules is None:
rules = PolicyEntriesType([sg_rule])
else:
for sgr in rules.get_policy_rule() or []:
sgr_copy = copy.copy(sgr)
sgr_copy.rule_uuid = sg_rule.rule_uuid
if sg_rule == sgr_copy:
raise Exception('SecurityGroupRuleExists %s' % sgr.rule_uuid)
rules.add_policy_rule(sg_rule)
sg_obj.set_security_group_entries(rules)
#end _security_group_rule_append
def _security_group_rule_remove(self, sg_obj, sg_rule):
rules = sg_obj.get_security_group_entries()
if rules is None:
raise Exception('SecurityGroupRuleNotExists %s' % sgr.rule_uuid)
else:
for sgr in rules.get_policy_rule() or []:
if sgr.rule_uuid == sg_rule.rule_uuid:
rules.delete_policy_rule(sgr)
sg_obj.set_security_group_entries(rules)
return
raise Exception('SecurityGroupRuleNotExists %s' % sg_rule.rule_uuid)
#end _security_group_rule_append
def security_group_create(self, sg_name, project_fq_name):
project_obj = self._vnc_lib.project_read(project_fq_name)
sg_obj = SecurityGroup(name=sg_name, parent_obj=project_obj)
self._vnc_lib.security_group_create(sg_obj)
return sg_obj
#end security_group_create
def test_sg(self):
#create sg and associate egress rule and check acls
sg1_obj = self.security_group_create('sg-1', [u'default-domain', u'default-project'])
self.wait_to_get_sg_id(sg1_obj.get_fq_name())
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
rule1 = {}
rule1['port_min'] = 0
rule1['port_max'] = 65535
rule1['direction'] = 'egress'
rule1['ip_prefix'] = None
rule1['protocol'] = 'any'
rule1['ether_type'] = 'IPv4'
rule1['sg_id'] = sg1_obj.get_security_group_id()
sg_rule1 = self._security_group_rule_build(rule1, sg1_obj.get_uuid())
self._security_group_rule_append(sg1_obj, sg_rule1)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name())
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
sg1_obj.set_configured_security_group_id(100)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name(), 100)
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
#create another sg and associate ingress rule and check acls
sg2_obj = self.security_group_create('sg-2', [u'default-domain', u'default-project'])
self.wait_to_get_sg_id(sg2_obj.get_fq_name())
sg2_obj = self._vnc_lib.security_group_read(sg2_obj.get_fq_name())
rule2 = {}
rule2['port_min'] = 0
rule2['port_max'] = 65535
rule2['direction'] = 'ingress'
rule2['ip_prefix'] = None
rule2['protocol'] = 'any'
rule2['ether_type'] = 'IPv4'
rule2['sg_id'] = sg2_obj.get_security_group_id()
sg_rule2 = self._security_group_rule_build(rule2, sg2_obj.get_uuid())
self._security_group_rule_append(sg2_obj, sg_rule2)
self._vnc_lib.security_group_update(sg2_obj)
self.check_security_group_id(sg2_obj.get_fq_name())
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'ingress-access-control-list',
sg2_obj.get_security_group_id())
#add ingress and egress rules to same sg and check for both
rule1['sg_id'] = sg2_obj.get_security_group_id()
sg_rule3 = self._security_group_rule_build(rule1, sg2_obj.get_uuid())
self._security_group_rule_append(sg2_obj, sg_rule3)
self._vnc_lib.security_group_update(sg2_obj)
self.check_security_group_id(sg2_obj.get_fq_name())
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'egress-access-control-list',
sg2_obj.get_security_group_id())
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'ingress-access-control-list',
sg2_obj.get_security_group_id())
#add one more ingress and egress
rule1['direction'] = 'ingress'
rule1['port_min'] = 1
rule1['port_max'] = 100
self._security_group_rule_append(sg2_obj, self._security_group_rule_build(rule1, sg2_obj.get_uuid()))
rule1['direction'] = 'egress'
rule1['port_min'] = 101
rule1['port_max'] = 200
self._security_group_rule_append(sg2_obj, self._security_group_rule_build(rule1, sg2_obj.get_uuid()))
self._vnc_lib.security_group_update(sg2_obj)
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'egress-access-control-list',
sg2_obj.get_security_group_id(), True)
self.check_acl_match_sg(sg2_obj.get_fq_name(), 'ingress-access-control-list',
sg2_obj.get_security_group_id(), True)
# duplicate security group id configured, vnc api allows
# isn't this a problem?
sg2_obj.set_configured_security_group_id(100)
self._vnc_lib.security_group_update(sg2_obj)
self.check_security_group_id(sg2_obj.get_fq_name(), 100)
#sg id '0' is not allowed, should not get modified
sg1_obj.set_configured_security_group_id(0)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name(), 8000001)
# -ve security group id not allowed, should not get modified
sg1_obj.set_configured_security_group_id(-100)
self._vnc_lib.security_group_update(sg1_obj)
self.check_security_group_id(sg1_obj.get_fq_name(), -100)
#end test_sg
def test_delete_sg(self):
#create sg and associate egress rule and check acls
sg1_obj = self.security_group_create('sg-1', [u'default-domain', u'default-project'])
self.wait_to_get_sg_id(sg1_obj.get_fq_name())
sg1_obj = self._vnc_lib.security_group_read(sg1_obj.get_fq_name())
rule1 = {}
rule1['ip_prefix'] = None
rule1['protocol'] = 'any'
rule1['ether_type'] = 'IPv4'
rule1['sg_id'] = sg1_obj.get_security_group_id()
rule1['direction'] = 'ingress'
rule1['port_min'] = 1
rule1['port_max'] = 100
rule_in_obj = self._security_group_rule_build(rule1, sg1_obj.get_uuid())
rule1['direction'] = 'egress'
rule1['port_min'] = 101
rule1['port_max'] = 200
rule_eg_obj = self._security_group_rule_build(rule1, sg1_obj.get_uuid())
self._security_group_rule_append(sg1_obj, rule_in_obj)
self._security_group_rule_append(sg1_obj, rule_eg_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self._security_group_rule_remove(sg1_obj, rule_in_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self._security_group_rule_append(sg1_obj, rule_in_obj)
self._security_group_rule_remove(sg1_obj, rule_eg_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self._security_group_rule_remove(sg1_obj, rule_in_obj)
self._vnc_lib.security_group_update(sg1_obj)
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'ingress-access-control-list',
sg1_obj.get_security_group_id())
self.check_acl_not_match_sg(sg1_obj.get_fq_name(), 'egress-access-control-list',
sg1_obj.get_security_group_id())
self.check_no_policies_for_sg(sg1_obj.get_fq_name())
self._vnc_lib.security_group_delete(fq_name=sg1_obj.get_fq_name())
#end test_sg
def test_asn(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
for obj in [vn1_obj]:
ident_name = self.get_obj_imid(obj)
gevent.sleep(2)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.check_ri_asn(self.get_ri_name(vn1_obj), 'target:64512:8000001')
# create router1
r1_name = self.id() + 'router1'
router1 = self.create_bgp_router(r1_name, 'contrail')
self.check_bgp_asn(router1.get_fq_name(), 64512)
# create virtual machine interface
vmi_name = self.id() + 'vmi1'
vmi = VirtualMachineInterface(vmi_name, parent_type='project', fq_name=['default-domain', 'default-project', vmi_name])
vmi.add_virtual_network(vn1_obj)
self._vnc_lib.virtual_machine_interface_create(vmi)
# create logical router
lr_name = self.id() + 'lr1'
lr = LogicalRouter(lr_name)
lr.add_virtual_machine_interface(vmi)
self._vnc_lib.logical_router_create(lr)
self.check_lr_asn(lr.get_fq_name(), 'target:64512:8000002')
#update global system config but dont change asn value for equality path
gs = self._vnc_lib.global_system_config_read(fq_name=[u'default-global-system-config'])
gs.set_autonomous_system(64512)
self._vnc_lib.global_system_config_update(gs)
# check route targets
self.check_ri_asn(self.get_ri_name(vn1_obj), 'target:64512:8000001')
self.check_bgp_asn(router1.get_fq_name(), 64512)
self.check_lr_asn(lr.get_fq_name(), 'target:64512:8000002')
#update ASN value
gs = self._vnc_lib.global_system_config_read(fq_name=[u'default-global-system-config'])
gs.set_autonomous_system(50000)
self._vnc_lib.global_system_config_update(gs)
# check new route targets
self.check_ri_asn(self.get_ri_name(vn1_obj), 'target:50000:8000001')
self.check_bgp_asn(router1.get_fq_name(), 50000)
self.check_lr_asn(lr.get_fq_name(), 'target:50000:8000002')
#end test_asn
def test_fip(self):
# create vn1
vn1_name = self.id() + 'vn1'
vn1_obj = self.create_virtual_network(vn1_name, '10.0.0.0/24')
# create vn2
vn2_name = self.id() + 'vn2'
vn2_obj = self.create_virtual_network(vn2_name, '20.0.0.0/24')
service_name = self.id() + 's1'
np = self.create_network_policy(vn1_obj, vn2_obj, [service_name], 'in-network')
sc = self.wait_to_get_sc()
sc_ri_name = 'service-'+sc[0]+'-default-domain_default-project_' + service_name
self.check_ri_state_vn_policy(self.get_ri_name(vn1_obj),
self.get_ri_name(vn1_obj, sc_ri_name))
self.check_ri_state_vn_policy(self.get_ri_name(vn2_obj, sc_ri_name),
self.get_ri_name(vn2_obj))
vmi_fq_name = 'default-domain:default-project:default-domain__default-project__test.test_service.TestPolicy.test_fips1__1__left__1'
vmi = self._vnc_lib.virtual_machine_interface_read(vmi_fq_name.split(':'))
vn3_name = 'vn-public'
vn3_obj = VirtualNetwork(vn3_name)
vn3_obj.set_router_external(True)
ipam3_obj = NetworkIpam('ipam3')
self._vnc_lib.network_ipam_create(ipam3_obj)
vn3_obj.add_network_ipam(ipam3_obj, VnSubnetsType(
[IpamSubnetType(SubnetType("192.168.7.0", 24))]))
vn3_uuid = self._vnc_lib.virtual_network_create(vn3_obj)
fip_pool_name = 'vn_public_fip_pool'
fip_pool = FloatingIpPool(fip_pool_name, vn3_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool)
fip_obj = FloatingIp("fip1", fip_pool)
default_project = self._vnc_lib.project_read(fq_name=[u'default-domain', u'default-project'])
fip_obj.set_project(default_project)
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
fip_obj.set_virtual_machine_interface(vmi)
self._vnc_lib.floating_ip_update(fip_obj)
fip_obj = self._vnc_lib.floating_ip_read(fip_obj.get_fq_name())
for obj in [fip_obj]:
ident_name = self.get_obj_imid(obj)
ifmap_ident = self.assertThat(FakeIfmapClient._graph, Contains(ident_name))
self.wait_to_get_link(ident_name, vmi_fq_name)
fip = fip_obj.get_floating_ip_address()
self.check_vrf_assign_table(vmi.get_fq_name(), fip, True)
fip_fq_name = fip_obj.get_fq_name()
self._vnc_lib.floating_ip_delete(fip_fq_name)
self.wait_to_remove_link(self.get_obj_imid(vmi), fip_fq_name)
self.check_vrf_assign_table(vmi.get_fq_name(), fip, False)
# end class TestRouteTable
|
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# stdlib
import datetime
import inspect
import os
from pathlib import Path
import shutil
import sys
from typing import Any
from typing import Dict
dir_name = inspect.getfile(inspect.currentframe()) # type: ignore
__location__ = os.path.join(os.getcwd(), os.path.dirname(dir_name))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, "../src"))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
# third party
from sphinx.ext import apidoc
except ImportError:
# third party
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/syft")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
# third party
from pkg_resources import parse_version
import sphinx
# found this --module-first here shorturl.at/iDKNW
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir} --module-first"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version("1.7"):
args = args[1:]
apidoc.main(args)
except Exception as e:
print(f"Running `sphinx-apidoc` failed!\n{e}")
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
]
extensions.append("recommonmark")
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# override autodoc defaults to skip/not skip certain methods
def skip(
app: Any, what: Any, name: str, obj: Any, would_skip: bool, options: Any
) -> bool:
if name == "__init__":
return False
if name == "__hash__":
return False
if name == "__eq__":
return False
if name == "_proto2object":
return False
if name == "_object2proto":
return False
if name == "_serialize":
return False
if name == "_deserialize":
return False
return would_skip
# To configure AutoStructify
def setup(app: Any) -> None:
# third party
from recommonmark.transform import AutoStructify
app.add_config_value(
"recommonmark_config",
{
"auto_toc_tree_section": "Contents",
"enable_eval_rst": True,
"enable_math": True,
"enable_inline_math": True,
},
True,
)
app.add_transform(AutoStructify)
app.connect("autodoc-skip-member", skip)
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The main toctree document.
master_doc = "index"
# General information about the project.
now = datetime.datetime.now()
project = "syft"
copyright = f"{now.year}, OpenMined Core Contributors"
# The version info for the project you're documenting, acts as a replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
default_version = "" # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = "" # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to the source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The rest default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, section_author and module_author directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinx-theme-graphite'
# html_theme = "alabaster"
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"sidebar_width": "300px"}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes/"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
# syft absolute
from syft import __version__ as version
except ImportError:
pass
else:
release = default_version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as the favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# sort methods by source order
autodoc_member_order = "bysource"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "syft-doc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "user_guide.tex", "syft Documentation", "Andrew Trask", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("http://www.sphinx-doc.org/en/stable", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://docs.scipy.org/doc/numpy", None),
"sklearn": ("http://scikit-learn.org/stable", None),
"pandas": ("http://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
}
|
|
#!/usr/bin/python3
# Halide tutorial lesson 8
# This lesson demonstrates how schedule multi-stage pipelines.
# This lesson can be built by invoking the command:
# make tutorial_lesson_08_scheduling_2
# in a shell with the current directory at the top of the halide source tree.
# Otherwise, see the platform-specific compiler invocations below.
# On linux, you can compile and run it like so:
# g++ lesson_08*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_08
# LD_LIBRARY_PATH=../bin ./lesson_08
# On os x:
# g++ lesson_08*.cpp -g -std=c++11 -I ../include -L ../bin -lHalide -o lesson_08
# DYLD_LIBRARY_PATH=../bin ./lesson_08
#include "Halide.h"
#include <stdio.h>
#using namespace Halide
import halide as hl
import numpy as np
import math
def main():
# First we'll declare some Vars to use below.
x, y = hl.Var("x"), hl.Var("y")
# Let's examine various scheduling options for a simple two stage
# pipeline. We'll start with the default schedule:
if True:
print("="*50)
producer, consumer = hl.Func("producer_default"), hl.Func("consumer_default")
# The first stage will be some simple pointwise math similar
# to our familiar gradient function. The value at position x,
# y is the sqrt of product of x and y.
producer[x, y] = hl.sqrt(x * y)
# Now we'll add a second stage which adds together multiple
# points in the first stage.
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# We'll turn on tracing for both functions.
consumer.trace_stores()
producer.trace_stores()
# And evaluate it over a 5x5 box.
print("\nEvaluating producer-consumer pipeline with default schedule")
consumer.realize(4, 4)
# There were no messages about computing values of the
# producer. This is because the default schedule fully
# inlines 'producer' into 'consumer'. It is as if we had
# written the following code instead:
# consumer[x, y] = (sqrt(x * y) +
# sqrt(x * (y + 1)) +
# sqrt((x + 1) * y) +
# sqrt((x + 1) * (y + 1)))
# All calls to 'producer' have been replaced with the body of
# 'producer', with the arguments subtituted in for the
# variables.
# The equivalent C code is:
result = np.empty((4, 4), dtype=np.float32)
for yy in range(4):
for xx in range(4):
result[yy][xx] = (math.sqrt(xx*yy) +
math.sqrt(xx*(yy+1)) +
math.sqrt((xx+1)*yy) +
math.sqrt((xx+1)*(yy+1)))
print()
# If we look at the loop nest, the producer doesn't appear
# at all. It has been inlined into the consumer.
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# Next we'll examine the next simplest option - computing all
# values required in the producer before computing any of the
# consumer. We call this schedule "root".
if True:
print("="*50)
# Start with the same function definitions:
producer, consumer = hl.Func("producer_root"), hl.Func("consumer_root")
producer[x, y] = hl.sqrt(x * y)
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# Tell Halide to evaluate all of producer before any of consumer.
producer.compute_root()
# Turn on tracing.
consumer.trace_stores()
producer.trace_stores()
# Compile and run.
print("\nEvaluating producer.compute_root()")
consumer.realize(4, 4)
# Reading the output we can see that:
# A) There were stores to producer.
# B) They all happened before any stores to consumer.
# Equivalent C:
result = np.empty((4, 4), dtype=np.float32)
# Allocate some temporary storage for the producer.
producer_storage = np.empty((5, 5), dtype=np.float32)
# Compute the producer.
for yy in range(5):
for xx in range(5):
producer_storage[yy][xx] = math.sqrt(xx * yy)
# Compute the consumer. Skip the prints this time.
for yy in range(4):
for xx in range(4):
result[yy][xx] = (producer_storage[yy][xx] +
producer_storage[yy+1][xx] +
producer_storage[yy][xx+1] +
producer_storage[yy+1][xx+1])
# Note that consumer was evaluated over a 4x4 box, so Halide
# automatically inferred that producer was needed over a 5x5
# box. This is the same 'bounds inference' logic we saw in
# the previous lesson, where it was used to detect and avoid
# out-of-bounds reads from an input image.
# If we print the loop nest, we'll see something very
# similar to the C above.
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# Let's compare the two approaches above from a performance
# perspective.
# Full inlining (the default schedule):
# - Temporary memory allocated: 0
# - Loads: 0
# - Stores: 16
# - Calls to sqrt: 64
# producer.compute_root():
# - Temporary memory allocated: 25 floats
# - Loads: 64
# - Stores: 39
# - Calls to sqrt: 25
# There's a trade-off here. Full inlining used minimal temporary
# memory and memory bandwidth, but did a whole bunch of redundant
# expensive math (calling sqrt). It evaluated most points in
# 'producer' four times. The second schedule,
# producer.compute_root(), did the mimimum number of calls to
# sqrt, but used more temporary memory and more memory bandwidth.
# In any given situation the correct choice can be difficult to
# make. If you're memory-bandwidth limited, or don't have much
# memory (e.g. because you're running on an old cell-phone), then
# it can make sense to do redundant math. On the other hand, sqrt
# is expensive, so if you're compute-limited then fewer calls to
# sqrt will make your program faster. Adding vectorization or
# multi-core parallelism tilts the scales in favor of doing
# redundant work, because firing up multiple cpu cores increases
# the amount of math you can do per second, but doesn't increase
# your system memory bandwidth or capacity.
# We can make choices in between full inlining and
# compute_root. Next we'll alternate between computing the
# producer and consumer on a per-scanline basis:
if True:
print("="*50)
# Start with the same function definitions:
producer, consumer = hl.Func("producer_y"), hl.Func("consumer_y")
producer[x, y] = hl.sqrt(x * y)
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# Tell Halide to evaluate producer as needed per y coordinate
# of the consumer:
producer.compute_at(consumer, y)
# This places the code that computes the producer just
# *inside* the consumer's for loop over y, as in the
# equivalent C below.
# Turn on tracing.
producer.trace_stores()
consumer.trace_stores()
# Compile and run.
print("\nEvaluating producer.compute_at(consumer, y)")
consumer.realize(4, 4)
# Reading the log you should see that producer and consumer
# alternate on a per-scanline basis. Let's look at the
# equivalent C:
result = np.empty((4, 4), dtype=np.float32)
# There's an outer loop over scanlines of consumer:
for yy in range(4):
# Allocate space and compute enough of the producer to
# satisfy this single scanline of the consumer. This
# means a 5x2 box of the producer.
producer_storage = np.empty((2, 5), dtype=np.float32)
for py in range(yy, yy + 2):
for px in range(5):
producer_storage[py-yy][px] = math.sqrt(px * py)
# Compute a scanline of the consumer.
for xx in range(4):
result[yy][xx] = (producer_storage[0][xx] +
producer_storage[1][xx] +
producer_storage[0][xx+1] +
producer_storage[1][xx+1])
# Again, if we print the loop nest, we'll see something very
# similar to the C above.
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# The performance characteristics of this strategy are in
# between inlining and compute root. We still allocate some
# temporary memory, but less that compute_root, and with
# better locality (we load from it soon after writing to it,
# so for larger images, values should still be in cache). We
# still do some redundant work, but less than full inlining:
# producer.compute_at(consumer, y):
# - Temporary memory allocated: 10 floats
# - Loads: 64
# - Stores: 56
# - Calls to sqrt: 40
# We could also say producer.compute_at(consumer, x), but this
# would be very similar to full inlining (the default
# schedule). Instead let's distinguish between the loop level at
# which we allocate storage for producer, and the loop level at
# which we actually compute it. This unlocks a few optimizations.
if True:
print("="*50)
producer, consumer = hl.Func("producer_store_root_compute_y"), hl.Func("consumer_store_root_compute_y")
producer[x, y] = hl.sqrt(x * y)
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# Tell Halide to make a buffer to store all of producer at
# the outermost level:
producer.store_root()
# ... but compute it as needed per y coordinate of the
# consumer.
producer.compute_at(consumer, y)
producer.trace_stores()
consumer.trace_stores()
print("\nEvaluating producer.store_root().compute_at(consumer, y)")
consumer.realize(4, 4)
# Reading the log you should see that producer and consumer
# again alternate on a per-scanline basis. It computes a 5x2
# box of the producer to satisfy the first scanline of the
# consumer, but after that it only computes a 5x1 box of the
# output for each new scanline of the consumer!
#
# Halide has detected that for all scanlines except for the
# first, it can reuse the values already sitting in the
# buffer we've allocated for producer. Let's look at the
# equivalent C:
result = np.empty((4, 4), dtype=np.float32)
# producer.store_root() implies that storage goes here:
producer_storage = np.empty((5, 5), dtype=np.float32)
# There's an outer loop over scanlines of consumer:
for yy in range(4):
# Compute enough of the producer to satisfy this scanline
# of the consumer.
for py in range(yy, yy + 2):
# Skip over rows of producer that we've already
# computed in a previous iteration.
if y > 0 and py == yy:
continue
for px in range(5):
producer_storage[py][px] = math.sqrt(px * py)
# Compute a scanline of the consumer.
for xx in range(4):
result[yy][xx] = (producer_storage[yy][xx] +
producer_storage[yy+1][xx] +
producer_storage[yy][xx+1] +
producer_storage[yy+1][xx+1])
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# The performance characteristics of this strategy are pretty
# good! The numbers are similar compute_root, except locality
# is better. We're doing the minimum number of sqrt calls,
# and we load values soon after they are stored, so we're
# probably making good use of the cache:
# producer.store_root().compute_at(consumer, y):
# - Temporary memory allocated: 10 floats
# - Loads: 64
# - Stores: 39
# - Calls to sqrt: 25
# Note that my claimed amount of memory allocated doesn't
# match the reference C code. Halide is performing one more
# optimization under the hood. It folds the storage for the
# producer down into a circular buffer of two
# scanlines. Equivalent C would actually look like this:
if True:
# Actually store 2 scanlines instead of 5
producer_storage = np.empty((2, 5), dtype=np.float32)
for yy in range(4):
for py in range(yy, yy + 2):
if y > 0 and py == yy:
continue
for px in range(5):
# Stores to producer_storage have their y coordinate bit-masked.
producer_storage[py & 1][px] = math.sqrt(px * py)
# Compute a scanline of the consumer.
for xx in range(4):
# Loads from producer_storage have their y coordinate bit-masked.
result[yy][xx] = (producer_storage[yy & 1][xx] +
producer_storage[(yy+1) & 1][xx] +
producer_storage[yy & 1][xx+1] +
producer_storage[(yy+1) & 1][xx+1])
# We can do even better, by leaving the storage outermost, but
# moving the computation into the innermost loop:
if True:
print("="*50)
producer, consumer = hl.Func("producer_store_root_compute_x"), hl.Func("consumer_store_root_compute_x")
producer[x, y] = hl.sqrt(x * y)
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# Store outermost, compute innermost.
producer.store_root().compute_at(consumer, x)
producer.trace_stores()
consumer.trace_stores()
print("\nEvaluating producer.store_root().compute_at(consumer, x)")
consumer.realize(4, 4)
# Reading the log, you should see that producer and consumer
# now alternate on a per-pixel basis. Here's the equivalent C:
result = np.empty((4, 4), dtype=np.float32)
# producer.store_root() implies that storage goes here, but
# we can fold it down into a circular buffer of two
# scanlines:
producer_storage = np.empty((2, 5), dtype=np.float32)
# For every pixel of the consumer:
for yy in range(4):
for xx in range(4):
# Compute enough of the producer to satisfyy this
# pixxel of the consumer, but skip values that we've
# alreadyy computed:
if (yy == 0) and (xx == 0):
producer_storage[yy & 1][xx] = math.sqrt(xx*yy)
if yy == 0:
producer_storage[yy & 1][xx+1] = math.sqrt((xx+1)*yy)
if xx == 0:
producer_storage[(yy+1) & 1][xx] = math.sqrt(xx*(yy+1))
producer_storage[(yy+1) & 1][xx+1] = math.sqrt((xx+1)*(yy+1))
result[yy][xx] = (producer_storage[yy & 1][xx] +
producer_storage[(yy+1) & 1][xx] +
producer_storage[yy & 1][xx+1] +
producer_storage[(yy+1) & 1][xx+1])
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# The performance characteristics of this strategy are the
# best so far. One of the four values of the producer we need
# is probably still sitting in a register, so I won't count
# it as a load:
# producer.store_root().compute_at(consumer, x):
# - Temporary memory allocated: 10 floats
# - Loads: 48
# - Stores: 56
# - Calls to sqrt: 40
# So what's the catch? Why not always do
# producer.store_root().compute_at(consumer, x) for this type of
# code?
#
# The answer is parallelism. In both of the previous two
# strategies we've assumed that values computed on previous
# iterations are lying around for us to reuse. This assumes that
# previous values of x or y happened earlier in time and have
# finished. This is not true if you parallelize or vectorize
# either loop. Darn. If you parallelize, Halide won't inject the
# optimizations that skip work already done if there's a parallel
# loop in between the store_at level and the compute_at level,
# and won't fold the storage down into a circular buffer either,
# which makes our store_root pointless.
# We're running out of options. We can make new ones by
# splitting. We can store_at or compute_at at the natural
# variables of the consumer (x and y), or we can split x or y
# into new inner and outer sub-variables and then schedule with
# respect to those. We'll use this to express fusion in tiles:
if True:
print("="*50)
producer, consumer = hl.Func("producer_tile"), hl.Func("consumer_tile")
producer[x, y] = hl.sqrt(x * y)
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# Tile the consumer using 2x2 tiles.
x_outer, y_outer = hl.Var("x_outer"), hl.Var("y_outer")
x_inner, y_inner = hl.Var("x_inner"), hl.Var("y_inner")
consumer.tile(x, y, x_outer, y_outer, x_inner, y_inner, 2, 2)
# Compute the producer per tile of the consumer
producer.compute_at(consumer, x_outer)
# Notice that I wrote my schedule starting from the end of
# the pipeline (the consumer). This is because the schedule
# for the producer refers to x_outer, which we introduced
# when we tiled the consumer. You can write it in the other
# order, but it tends to be harder to read.
# Turn on tracing.
producer.trace_stores()
consumer.trace_stores()
print("\nEvaluating:"
"consumer.tile(x, y, x_outer, y_outer, x_inner, y_inner, 2, 2)"
"producer.compute_at(consumer, x_outer)")
consumer.realize(4, 4)
# Reading the log, you should see that producer and consumer
# now alternate on a per-tile basis. Here's the equivalent C:
result = np.empty((4, 4), dtype=np.float32)
# For every tile of the consumer:
for y_outer in range(2):
for x_outer in range(2):
# Compute the x and y coords of the start of this tile.
x_base = x_outer*2
y_base = y_outer*2
# Compute enough of producer to satisfy this tile. A
# 2x2 tile of the consumer requires a 3x3 tile of the
# producer.
producer_storage = np.empty((3, 3), dtype=np.float32)
for py in range(y_base, y_base + 3):
for px in range(x_base + 3):
producer_storage[py-y_base][px-x_base] = math.sqrt(px * py)
# Compute this tile of the consumer
for y_inner in range(2):
for x_inner in range(2):
xx = x_base + x_inner
yy = y_base + y_inner
result[yy][xx] = (producer_storage[yy - y_base][xx - x_base] +
producer_storage[yy - y_base + 1][xx - x_base] +
producer_storage[yy - y_base][xx - x_base + 1] +
producer_storage[yy - y_base + 1][xx - x_base + 1])
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# Tiling can make sense for problems like this one with
# stencils that reach outwards in x and y. Each tile can be
# computed independently in parallel, and the redundant work
# done by each tile isn't so bad once the tiles get large
# enough.
# Let's try a mixed strategy that combines what we have done with
# splitting, parallelizing, and vectorizing. This is one that
# often works well in practice for large images. If you
# understand this schedule, then you understand 95% of scheduling
# in Halide.
if True:
print("="*50)
producer, consumer = hl.Func("producer_mixed"), hl.Func("consumer_mixed")
producer[x, y] = hl.sqrt(x * y)
consumer[x, y] = (producer[x, y] +
producer[x, y+1] +
producer[x+1, y] +
producer[x+1, y+1])
# Split the y coordinate of the consumer into strips of 16 scanlines:
yo, yi = hl.Var("yo"), hl.Var("yi")
consumer.split(y, yo, yi, 16)
# Compute the strips using a thread pool and a task queue.
consumer.parallel(yo)
# Vectorize across x by a factor of four.
consumer.vectorize(x, 4)
# Now store the producer per-strip. This will be 17 scanlines
# of the producer (16+1), but hopefully it will fold down
# into a circular buffer of two scanlines:
producer.store_at(consumer, yo)
# Within each strip, compute the producer per scanline of the
# consumer, skipping work done on previous scanlines.
producer.compute_at(consumer, yi)
# Also vectorize the producer (because sqrt is vectorizable on x86 using SSE).
producer.vectorize(x, 4)
# Let's leave tracing off this time, because we're going to
# evaluate over a larger image.
# consumer.trace_stores()
# producer.trace_stores()
halide_result = consumer.realize(800, 600)
# Here's the equivalent (serial) C:
c_result = np.empty((600, 800), dtype=np.float32)
# For every strip of 16 scanlines
for yo in range(600//16 + 1): # (this loop is parallel in the Halide version)
# 16 doesn't divide 600, so push the last slice upwards to fit within [0, 599] (see lesson 05).
y_base = yo * 16
if y_base > (600-16):
y_base = 600-16
# Allocate a two-scanline circular buffer for the producer
producer_storage = np.empty((2, 801), dtype=np.float32)
# For every scanline in the strip of 16:
for yi in range(16):
yy = y_base + yi
for py in range(yy, yy+2):
# Skip scanlines already computed *within this task*
if (yi > 0) and (py == yy):
continue
# Compute this scanline of the producer in 4-wide vectors
for x_vec in range(800//4 + 1):
x_base = x_vec*4
# 4 doesn't divide 801, so push the last vector left (see lesson 05).
if x_base > (801 - 4):
x_base = 801 - 4
# If you're on x86, Halide generates SSE code for this part:
xx = [x_base + 0, x_base + 1, x_base + 2, x_base + 3]
vec= [math.sqrt(xx[0] * py),
math.sqrt(xx[1] * py),
math.sqrt(xx[2] * py),
math.sqrt(xx[3] * py)]
producer_storage[py & 1][xx[0]] = vec[0]
producer_storage[py & 1][xx[1]] = vec[1]
producer_storage[py & 1][xx[2]] = vec[2]
producer_storage[py & 1][xx[3]] = vec[3]
# Now compute consumer for this scanline:
for x_vec in range(800//4):
x_base = x_vec * 4
# Again, Halide's equivalent here uses SSE.
xx = [x_base, x_base + 1, x_base + 2, x_base + 3]
vec = [
(producer_storage[yy & 1][xx[0]] +
producer_storage[(yy+1) & 1][xx[0]] +
producer_storage[yy & 1][xx[0]+1] +
producer_storage[(yy+1) & 1][xx[0]+1]),
(producer_storage[yy & 1][xx[1]] +
producer_storage[(yy+1) & 1][xx[1]] +
producer_storage[yy & 1][xx[1]+1] +
producer_storage[(yy+1) & 1][xx[1]+1]),
(producer_storage[yy & 1][xx[2]] +
producer_storage[(yy+1) & 1][xx[2]] +
producer_storage[yy & 1][xx[2]+1] +
producer_storage[(yy+1) & 1][xx[2]+1]),
(producer_storage[yy & 1][xx[3]] +
producer_storage[(yy+1) & 1][xx[3]] +
producer_storage[yy & 1][xx[3]+1] +
producer_storage[(yy+1) & 1][xx[3]+1])
]
c_result[yy][xx[0]] = vec[0]
c_result[yy][xx[1]] = vec[1]
c_result[yy][xx[2]] = vec[2]
c_result[yy][xx[3]] = vec[3]
print("Pseudo-code for the schedule:")
consumer.print_loop_nest()
print()
# Look on my code, ye mighty, and despair!
# Let's check the C result against the Halide result. Doing
# this I found several bugs in my C implementation, which
# should tell you something.
for yy in range(600):
for xx in range(800):
error = halide_result(xx, yy) - c_result[yy][xx]
# It's floating-point math, so we'll allow some slop:
if (error < -0.001) or (error > 0.001):
raise Exception("halide_result(%d, %d) = %f instead of %f" % (
xx, yy, halide_result(xx, yy), c_result[yy][xx]))
return -1
# This stuff is hard. We ended up in a three-way trade-off
# between memory bandwidth, redundant work, and
# parallelism. Halide can't make the correct choice for you
# automatically (sorry). Instead it tries to make it easier for
# you to explore various options, without messing up your
# program. In fact, Halide promises that scheduling calls like
# compute_root won't change the meaning of your algorithm -- you
# should get the same bits back no matter how you schedule
# things.
# So be empirical! Experiment with various schedules and keep a
# log of performance. Form hypotheses and then try to prove
# yourself wrong. Don't assume that you just need to vectorize
# your code by a factor of four and run it on eight cores and
# you'll get 32x faster. This almost never works. Modern systems
# are complex enough that you can't predict performance reliably
# without running your code.
# We suggest you start by scheduling all of your non-trivial
# stages compute_root, and then work from the end of the pipeline
# upwards, inlining, parallelizing, and vectorizing each stage in
# turn until you reach the top.
# Halide is not just about vectorizing and parallelizing your
# code. That's not enough to get you very far. Halide is about
# giving you tools that help you quickly explore different
# trade-offs between locality, redundant work, and parallelism,
# without messing up the actual result you're trying to compute.
print("="*50)
print("Success!")
return 0
if __name__ == "__main__":
main()
|
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
import pkg_resources
import pluggy
import py
import toml
import tox
from tox.constants import INFO
from tox.interpreters import Interpreters, NoInterpreterInfo
hookimpl = tox.hookimpl
"""DEPRECATED - REMOVE - this is left for compatibility with plugins importing this from here.
Instead create a hookimpl in your code with:
import pluggy
hookimpl = pluggy.HookimplMarker("tox")
"""
default_factors = tox.PYTHON.DEFAULT_FACTORS
"""DEPRECATED MOVE - please update to new location."""
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
self.argparser = argparse.ArgumentParser(description="tox options", add_help=False)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args):
return self.argparser.parse_args(args)
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = pkg_resources.Requirement.parse(dep1).project_name
try:
dep2_name = pkg_resources.Requirement.parse(dep2).project_name
except pkg_resources.RequirementParseError:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg:
origpath = config.invocationcwd.join(arg, abs=True)
if origpath.check():
arg = testenv_config.changedir.bestrelpath(origpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "python -m pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution"
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
ParseIni(config, config_file, content)
pm.hook.tox_configure(config=config) # post process config object
break
else:
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(pluginmanager=pm, option=option, interpreters=interpreters, parser=parser)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val)
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version",
action="store_true",
dest="version",
help="report version information to stdout.",
)
parser.add_argument(
"-h", "--help", action="store_true", dest="help", help="show help about options"
)
parser.add_argument(
"--help-ini", "--hi", action="store_true", dest="helpini", help="show help about ini-names"
)
parser.add_argument(
"-v",
action="count",
dest="verbose_level",
default=0,
help="increase verbosity of reporting output."
"-vv mode turns off output redirection for package installation, "
"above level two verbosity flags are passed through to pip (with two less level)",
)
parser.add_argument(
"-q",
action="count",
dest="quiet_level",
default=0,
help="progressively silence reporting output.",
)
parser.add_argument(
"--showconfig",
action="store_true",
help="show configuration information for all environments. ",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
dest="listenvs",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
dest="listenvs_all",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c",
action="store",
default=None,
dest="configfile",
help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--notest", action="store_true", dest="notest", help="skip invoking test commands."
)
parser.add_argument(
"--sdistonly",
action="store_true",
dest="sdistonly",
help="only perform the sdist packaging activity.",
)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
action="store",
default=None,
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
dest="develop",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
dest="pre",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r",
"--recreate",
action="store_true",
dest="recreate",
help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
action="store",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
action="store",
metavar="SEED",
default=None,
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
default=None,
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy", action="store_true", help="override alwayscopy setting to True in all envs"
)
cli_skip_missing_interpreter(parser)
parser.add_argument(
"--workdir",
action="store",
dest="workdir",
metavar="PATH",
default=None,
help="tox working directory",
)
parser.add_argument(
"args", nargs="*", help="additional arguments available to command positional substitution"
)
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
if factor in tox.PYTHON.DEFAULT_FACTORS:
implied_python = tox.PYTHON.DEFAULT_FACTORS[factor]
break
else:
implied_python, factor = None, None
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
implied_version = tox.PYTHON.PY_FACTORS_RE.match(factor).group(2)
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = "".join(
str(i) for i in python_info_for_proposed.version_info[0:2]
)
if implied_version != proposed_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version, implied_version, testenv_config.envname
)
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="string",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory"
)
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log", help="venv log directory"
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {"PATH", "PIP_INDEX_URL", "LANG", "LANGUAGE", "LD_LIBRARY_PATH", "TOX_WORK_DIR"}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self.missing_subs = []
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
if tox.INFO.IS_WIN and "jython" not in self.basepython and "pypy" not in self.basepython:
return self.envdir.join("Scripts")
else:
return self.envdir.join("bin")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts"
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err)
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
config.toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins", self._cfg, prefix=prefix, fallbacksections=["tox"]
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
# As older versions of tox may have bugs or incompatibilities that
# prevent parsing of tox.ini this must be the first thing checked.
config.minversion = reader.getstring("minversion", None)
if config.minversion:
tox_version = pkg_resources.parse_version(tox.__version__)
config_min_version = pkg_resources.parse_version(self.config.minversion)
if config_min_version > tox_version:
raise tox.exception.MinVersionError(
"tox version is {}, required is at least {}".format(
tox.__version__, self.config.minversion
)
)
self.ensure_requires_satisfied(reader.getlist("requires"))
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(distshare=config.distshare)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
self.parse_build_isolation(config, reader)
config.envlist, all_envs = self._getenvdata(reader, config)
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
config.envconfigs[name] = self.make_envconfig(name, section, reader._subs, config)
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
if name not in config.envconfigs:
config.envconfigs[name] = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config
)
@staticmethod
def ensure_requires_satisfied(specified):
missing_requirements = []
for s in specified:
try:
pkg_resources.get_distribution(s)
except pkg_resources.RequirementParseError:
raise
except Exception:
missing_requirements.append(str(pkg_resources.Requirement(s)))
if missing_requirements:
raise tox.exception.MissingRequirement(
"Packages {} need to be installed alongside tox in {}".format(
", ".join(missing_requirements), sys.executable
)
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in ("bool", "path", "string", "dict", "dict_setenv", "argv", "argvlist"):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc.missing_subs.append(e.name)
res = e.FLAG
setattr(tc, env_attr.name, res)
if atype in ("path", "string"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getenvdata(self, reader, config):
candidates = (
self.config.option.env,
os.environ.get("TOXENV"),
reader.getstring("envlist", replace=False),
)
env_str = next((i for i in candidates if i), [])
env_list = _split_env(env_str)
# collect section envs
all_envs = OrderedDict((i, None) for i in env_list)
if "ALL" in all_envs:
all_envs.pop("ALL")
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.pop(package_env)
if not env_list or "ALL" in env_list:
env_list = list(all_envs.keys())
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
all_env_list = list(all_envs.keys())
return env_list, all_env_list
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = re.split(r"((?:\{[^}]+\})+)|,", envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = re.split(r"\{([^}]+)\}", env)
parts = [re.sub(r"\s+", "", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __str__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
__repr__ = __str__
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
"""Check value matches substitution form of referencing value from other section.
E.g. {[base]commands}
"""
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
d = {}
for line in value.split(sep):
if line.strip():
name, rest = line.split("=", 1)
d[name.strip()] = rest.strip()
return d
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s)
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getstring(self, name, default=None, replace=True, crossonly=False):
x = None
for s in [self.section_name] + self.fallbacksections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
x = self._apply_factors(x)
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
# print "getstring", self.section_name, name, "returned", repr(x)
return x
def _apply_factors(self, s):
def factor_line(line):
m = re.search(r"^([\w{}\.!,-]+)\:\s+(.+)", line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name)
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>[^{}]*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
# special case: all empty values means ":" which is os.pathsep
if not any(g.values()):
return os.pathsep
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
try:
sub_type = g["sub_type"]
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided"
)
if sub_type == "env":
return self._replace_env(match)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type)
)
return self._replace_substitution(match)
def _replace_env(self, match):
key = match.group("substitution_value")
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
default = match.group("default_value")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise ValueError(
"{} already in {}".format((section, item), self.reader._subststack)
)
x = str(cfg[section][item])
return self.reader._replace(
x, name=item, section_name=section, crossonly=self.crossonly
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, match):
sub_key = match.group("substitution_value")
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command, replace))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name, "commands"
)
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True):
posargs = getattr(reader, "posargs", "")
posargs_string = list2cmdline([x for x in posargs if x])
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
newcommand += posargs_string
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += posargs_string
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_word = new_word.replace("\\{", "{").replace("\\}", "}")
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
This script allows to load a number of .db or .rdb files and display bits in
a nice visualization.
When more than one files are loaded, a difference between them is shown.
Differring bits are highlighted.
'''
import argparse
import re
import sys
import itertools
# =============================================================================
def tagmap(tag):
"""
Maps a specific tag name to its generic name
"""
tag = tag.replace("CLBLL_L", "CLB")
tag = tag.replace("CLBLL_M", "CLB")
tag = tag.replace("CLBLM_L", "CLB")
tag = tag.replace("CLBLM_M", "CLB")
tag = tag.replace("SLICEL", "SLICE")
tag = tag.replace("SLICEM", "SLICE")
tag = tag.replace("LIOB33", "IOB33")
tag = tag.replace("RIOB33", "IOB33")
tag = tag.replace("LIOI3", "IOI3")
tag = tag.replace("RIOI3", "IOI3")
# TODO: Add other tag mappings
return tag
def parse_bit(bit):
"""
Decodes string describing a bit. Returns a tuple (frame, bit, value)
"""
match = re.match("^(!?)([0-9]+)_([0-9]+)$", bit)
assert match != None, bit
val = int(match.group(1) != "!")
frm = int(match.group(2))
bit = int(match.group(3))
return frm, bit, val
def load_and_sort_segbits(file_name, tagmap=lambda tag: tag):
"""
Loads a segbit file (.db or .rdb). Skips bits containing '<' or '>'
"""
# Load segbits
segbits = {}
with open(file_name, "r") as fp:
lines = fp.readlines()
# Parse lines
for line in lines:
line = line.strip()
fields = line.split()
if len(fields) < 2:
print("Malformed line: '%s'" % line)
continue
# Map name
feature = tagmap(fields[0])
# Decode bits
bits = []
for bit in fields[1:]:
if "<" in bit or ">" in bit:
continue
bits.append(parse_bit(bit))
# Sort bits
bits.sort(key=lambda bit: (bit[0], bit[1],))
segbits[feature] = bits
return segbits
# =============================================================================
def make_header_lines(all_bits):
"""
Formats header lines
"""
lines = []
# Bit names
bit_names = ["%d_%d" % (b[0], b[1]) for b in all_bits]
bit_len = 6
for i in range(bit_len):
line = ""
for j in range(len(all_bits)):
bstr = bit_names[j].ljust(bit_len).replace("_", "|")
line += bstr[i]
lines.append(line)
return lines
def make_data_lines(all_tags, all_bits, segbits):
"""
Formats data lines
"""
lines = []
def map_f(b):
if b < 0: return "0"
if b > 0: return "1"
return "-"
# Bit data
for tag in all_tags:
if tag in segbits.keys():
lines.append("".join(map(map_f, segbits[tag])))
else:
lines.append(" " * len(all_bits))
return lines
def main():
# Colors for TTY
if sys.stdout.isatty():
colors = {
"NONE": "\033[0m",
"DIFF": "\033[1m",
}
# Colors for pipe
else:
colors = {
"NONE": "",
"DIFF": "",
}
# ........................................................................
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("files", nargs="*", type=str, help="Input files")
args = parser.parse_args()
# Load segbits
all_segbits = []
for f in args.files:
all_segbits.append(load_and_sort_segbits(f, tagmap))
# List of all features and all bits
all_tags = set()
all_bits = set()
for segbits in all_segbits:
all_tags |= set(segbits.keys())
for bits in segbits.values():
all_bits |= set([(b[0], b[1]) for b in bits])
all_tags = sorted(list(all_tags))
all_bits = sorted(list(all_bits))
# Convert bit lists to bit vectors
for segbits in all_segbits:
for tag in segbits.keys():
vec = list([0] * len(all_bits))
for i, bit in enumerate(all_bits):
if (bit[0], bit[1], 0) in segbits[tag]:
vec[i] = -1
if (bit[0], bit[1], 1) in segbits[tag]:
vec[i] = +1
segbits[tag] = vec
# Make header and data lines
header_lines = make_header_lines(all_bits)
data_lines = [
make_data_lines(all_tags, all_bits, segbits) for segbits in all_segbits
]
# Display
max_tag_len = max([len(tag) for tag in all_tags])
for l in header_lines:
line = " " * max_tag_len + " "
for i in range(len(all_segbits)):
line += l + " "
print(line)
data_len = len(all_bits)
for i, tag in enumerate(all_tags):
line = tag.ljust(max_tag_len) + " "
diff = bytearray(data_len)
for l1, l2 in itertools.combinations(data_lines, 2):
for j in range(data_len):
if l1[i][j] != l2[i][j]:
diff[j] = 1
for l in data_lines:
for j in range(data_len):
if diff[j]:
line += colors["DIFF"] + l[i][j] + colors["NONE"]
else:
line += l[i][j]
line += " "
print(line)
# =============================================================================
if __name__ == "__main__":
main()
|
|
import json
import logging
import os
from threading import RLock
from uuid import uuid4
from azure.common.client_factory import get_client_from_cli_profile
from msrestazure.azure_active_directory import MSIAuthentication
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentMode
from knack.util import CLIError
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME
from ray.autoscaler._private.azure.config import bootstrap_azure
VM_NAME_MAX_LEN = 64
VM_NAME_UUID_LEN = 8
logger = logging.getLogger(__name__)
def synchronized(f):
def wrapper(self, *args, **kwargs):
self.lock.acquire()
try:
return f(self, *args, **kwargs)
finally:
self.lock.release()
return wrapper
class AzureNodeProvider(NodeProvider):
"""Node Provider for Azure
This provider assumes Azure credentials are set by running ``az login``
and the default subscription is configured through ``az account``
or set in the ``provider`` field of the autoscaler configuration.
Nodes may be in one of three states: {pending, running, terminated}. Nodes
appear immediately once started by ``create_node``, and transition
immediately to terminated when ``terminate_node`` is called.
"""
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
kwargs = {}
if "subscription_id" in provider_config:
kwargs["subscription_id"] = provider_config["subscription_id"]
try:
self.compute_client = get_client_from_cli_profile(
client_class=ComputeManagementClient, **kwargs)
self.network_client = get_client_from_cli_profile(
client_class=NetworkManagementClient, **kwargs)
self.resource_client = get_client_from_cli_profile(
client_class=ResourceManagementClient, **kwargs)
except CLIError as e:
if str(e) != "Please run 'az login' to setup account.":
raise
else:
logger.info("CLI profile authentication failed. Trying MSI")
credentials = MSIAuthentication()
self.compute_client = ComputeManagementClient(
credentials=credentials, **kwargs)
self.network_client = NetworkManagementClient(
credentials=credentials, **kwargs)
self.resource_client = ResourceManagementClient(
credentials=credentials, **kwargs)
self.lock = RLock()
# cache node objects
self.cached_nodes = {}
@synchronized
def _get_filtered_nodes(self, tag_filters):
def match_tags(vm):
for k, v in tag_filters.items():
if vm.tags.get(k) != v:
return False
return True
vms = self.compute_client.virtual_machines.list(
resource_group_name=self.provider_config["resource_group"])
nodes = [self._extract_metadata(vm) for vm in filter(match_tags, vms)]
self.cached_nodes = {node["name"]: node for node in nodes}
return self.cached_nodes
def _extract_metadata(self, vm):
# get tags
metadata = {"name": vm.name, "tags": vm.tags, "status": ""}
# get status
resource_group = self.provider_config["resource_group"]
instance = self.compute_client.virtual_machines.instance_view(
resource_group_name=resource_group, vm_name=vm.name).as_dict()
for status in instance["statuses"]:
code, state = status["code"].split("/")
# skip provisioning status
if code == "PowerState":
metadata["status"] = state
break
# get ip data
nic_id = vm.network_profile.network_interfaces[0].id
metadata["nic_name"] = nic_id.split("/")[-1]
nic = self.network_client.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=metadata["nic_name"])
ip_config = nic.ip_configurations[0]
if not self.provider_config.get("use_internal_ips", False):
public_ip_id = ip_config.public_ip_address.id
metadata["public_ip_name"] = public_ip_id.split("/")[-1]
public_ip = self.network_client.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=metadata["public_ip_name"])
metadata["external_ip"] = public_ip.ip_address
metadata["internal_ip"] = ip_config.private_ip_address
return metadata
def non_terminated_nodes(self, tag_filters):
"""Return a list of node ids filtered by the specified tags dict.
This list must not include terminated nodes. For performance reasons,
providers are allowed to cache the result of a call to nodes() to
serve single-node queries (e.g. is_running(node_id)). This means that
nodes() must be called again to refresh results.
Examples:
>>> provider.non_terminated_nodes({TAG_RAY_NODE_KIND: "worker"})
["node-1", "node-2"]
"""
nodes = self._get_filtered_nodes(tag_filters=tag_filters)
return [
k for k, v in nodes.items()
if not v["status"].startswith("deallocat")
]
def is_running(self, node_id):
"""Return whether the specified node is running."""
# always get current status
node = self._get_node(node_id=node_id)
return node["status"] == "running"
def is_terminated(self, node_id):
"""Return whether the specified node is terminated."""
# always get current status
node = self._get_node(node_id=node_id)
return node["status"].startswith("deallocat")
def node_tags(self, node_id):
"""Returns the tags of the given node (string dict)."""
return self._get_cached_node(node_id=node_id)["tags"]
def external_ip(self, node_id):
"""Returns the external ip of the given node."""
ip = (self._get_cached_node(node_id=node_id)["external_ip"]
or self._get_node(node_id=node_id)["external_ip"])
return ip
def internal_ip(self, node_id):
"""Returns the internal ip (Ray ip) of the given node."""
ip = (self._get_cached_node(node_id=node_id)["internal_ip"]
or self._get_node(node_id=node_id)["internal_ip"])
return ip
def create_node(self, node_config, tags, count):
"""Creates a number of nodes within the namespace."""
# TODO: restart deallocated nodes if possible
resource_group = self.provider_config["resource_group"]
# load the template file
current_path = os.path.dirname(os.path.abspath(__file__))
template_path = os.path.join(current_path, "azure-vm-template.json")
with open(template_path, "r") as template_fp:
template = json.load(template_fp)
# get the tags
config_tags = node_config.get("tags", {}).copy()
config_tags.update(tags)
config_tags[TAG_RAY_CLUSTER_NAME] = self.cluster_name
name_tag = config_tags.get(TAG_RAY_NODE_NAME, "node")
unique_id = uuid4().hex[:VM_NAME_UUID_LEN]
vm_name = "{name}-{id}".format(name=name_tag, id=unique_id)
use_internal_ips = self.provider_config.get("use_internal_ips", False)
template_params = node_config["azure_arm_parameters"].copy()
template_params["vmName"] = vm_name
template_params["provisionPublicIp"] = not use_internal_ips
template_params["vmTags"] = config_tags
template_params["vmCount"] = count
parameters = {
"properties": {
"mode": DeploymentMode.incremental,
"template": template,
"parameters": {
key: {
"value": value
}
for key, value in template_params.items()
}
}
}
# TODO: we could get the private/public ips back directly
self.resource_client.deployments.create_or_update(
resource_group_name=resource_group,
deployment_name="ray-vm-{}".format(name_tag),
parameters=parameters).wait()
@synchronized
def set_node_tags(self, node_id, tags):
"""Sets the tag values (string dict) for the specified node."""
node_tags = self._get_cached_node(node_id)["tags"]
node_tags.update(tags)
self.compute_client.virtual_machines.update(
resource_group_name=self.provider_config["resource_group"],
vm_name=node_id,
parameters={"tags": node_tags})
self.cached_nodes[node_id]["tags"] = node_tags
def terminate_node(self, node_id):
"""Terminates the specified node. This will delete the VM and
associated resources (NIC, IP, Storage) for the specified node."""
resource_group = self.provider_config["resource_group"]
try:
# get metadata for node
metadata = self._get_node(node_id)
except KeyError:
# node no longer exists
return
# TODO: deallocate instead of delete to allow possible reuse
# self.compute_client.virtual_machines.deallocate(
# resource_group_name=resource_group,
# vm_name=node_id)
# gather disks to delete later
vm = self.compute_client.virtual_machines.get(
resource_group_name=resource_group, vm_name=node_id)
disks = {d.name for d in vm.storage_profile.data_disks}
disks.add(vm.storage_profile.os_disk.name)
try:
# delete machine, must wait for this to complete
self.compute_client.virtual_machines.delete(
resource_group_name=resource_group, vm_name=node_id).wait()
except Exception as e:
logger.warning("Failed to delete VM: {}".format(e))
try:
# delete nic
self.network_client.network_interfaces.delete(
resource_group_name=resource_group,
network_interface_name=metadata["nic_name"])
except Exception as e:
logger.warning("Failed to delete nic: {}".format(e))
# delete ip address
if "public_ip_name" in metadata:
try:
self.network_client.public_ip_addresses.delete(
resource_group_name=resource_group,
public_ip_address_name=metadata["public_ip_name"])
except Exception as e:
logger.warning("Failed to delete public ip: {}".format(e))
# delete disks
for disk in disks:
try:
self.compute_client.disks.delete(
resource_group_name=resource_group, disk_name=disk)
except Exception as e:
logger.warning("Failed to delete disk: {}".format(e))
def _get_node(self, node_id):
self._get_filtered_nodes({}) # Side effect: updates cache
return self.cached_nodes[node_id]
def _get_cached_node(self, node_id):
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id=node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_azure(cluster_config)
|
|
# Copyright (C) 2014 MediaMath, Inc. <http://www.mediamath.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite_backend as sql_backend
import table_merger
import util
import qasino_table
import logging
import time
import re
import yaml
import sys
import thread
from twisted.internet import threads
from twisted.internet import task
from twisted.internet import reactor
class DataManager(object):
def __init__(self, use_dbfile, db_dir=None, signal_channel=None, archive_db_dir=None,
generation_duration_s=30):
self.saved_tables = {}
self.query_id = 0
self.views = {}
self.thread_id = thread.get_ident()
self.stats = {}
self.generation_duration_s = generation_duration_s
self.signal_channel = signal_channel
self.archive_db_dir = archive_db_dir
self.static_db_filepath = db_dir + '/qasino_table_store_static.db'
# Start with zero because we'll call rotate_dbs instantly below.
self.db_generation_number = 0
# use_dbfile can be:
# 'memory- -> use in memory db
# /%d/ -> use the provided template filename
# ! /%d/ -> use the filename (same db every generation)
self.one_db = False
self.db_name = use_dbfile
if use_dbfile == None:
self.db_name = "qasino_table_store_%d.db"
elif use_dbfile == "memory":
self.db_name = ":memory:"
self.one_db = True
elif use_dbfile.find('%d') == -1:
self.one_db = True
# Initialize some things
self.table_merger = table_merger.TableMerger(self)
# Add db_dir path
if db_dir != None and self.db_name != ":memory:":
self.db_name = db_dir + '/' + self.db_name
# Open the writer backend db.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_reader = None
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
self.sql_backend_writer_static = sql_backend.SqlConnections(self.static_db_filepath,
self,
self.archive_db_dir,
self.thread_id,
None)
# Make the data manager db rotation run at fixed intervals.
# This will also immediately make the call which will make the
# writer we just opened the reader and to open a new writer.
self.rotate_task = task.LoopingCall(self.async_rotate_dbs)
self.rotate_task.start(self.generation_duration_s)
def read_views(self, filename):
# Reset views
self.views = {}
try:
fh = open(filename, "r")
except Exception as e:
logging.info("Failed to open views file '%s': %s", filename, e)
return
try:
view_conf_obj = yaml.load(fh)
except Exception as e:
logging.info("Failed to parse view conf yaml file '%s': %s", filename, e)
return
for view in view_conf_obj:
try:
viewname = view["viewname"]
view = view["view"]
self.views[viewname] = { 'view' : view, 'loaded' : False, 'error' : '' }
except Exception as e:
logging.info("Failure getting view '%s': %s", view["viewname"] if "viewname" in view else 'unknown', e)
def get_query_id(self):
self.query_id += 1
return self.query_id
def shutdown(self):
self.rotate_task = None
self.sql_backend_reader = None
self.sql_backend_writer = None
def async_validate_and_route_query(self, sql, query_id, use_write_db=False):
if use_write_db:
return self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_writer)
else:
return self.sql_backend_reader.run_interaction(sql_backend.SqlConnections.READER_INTERACTION,
self.validate_and_route_query, sql, query_id, self.sql_backend_reader)
def validate_and_route_query(self, txn, sql, query_id, sql_backend):
# So when dbs rotate we'll force a shutdown of the backend
# after a certain amount of time to avoid hung or long running
# things in this code path from holding dbs open. This
# may/will invalidate references we might have in here so wrap
# it all in a try catch...
try:
m = re.search(r"^\s*select\s+", sql, flags=re.IGNORECASE)
if m == None:
# Process a non-select statement.
return self.process_non_select(txn, sql, query_id, sql_backend)
# Process a select statement.
return sql_backend.do_select(txn, sql)
except Exception as e:
msg = "Exception in validate_and_route_query: {}".format(str(e))
logging.info(msg)
return { "retval" : 0, "error_message" : msg }
def process_non_select(self, txn, sql, query_id, sql_backend):
"""
Called for non-select statements like show tables and desc.
"""
# DESC?
m = re.search(r"^\s*desc\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
(retval, error_message, table) = sql_backend.do_desc(txn, m.group(1))
result = { "retval" : retval }
if error_message:
result["error_message"] = error_message
if table:
result["data"] = table
return result
# DESC VIEW?
m = re.search(r"^\s*desc\s+view\s+(\S+)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT view FROM qasino_server_views WHERE viewname = '%s';" % m.group(1))
# SHOW tables?
m = re.search(r"^\s*show\s+tables\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables ORDER BY tablename;")
# SHOW tables with LIKE?
m = re.search(r"^\s*show\s+tables\s+like\s+('\S+')\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_tables WHERE tablename LIKE {} ORDER BY tablename;".format(m.group(1)) )
# SHOW connections?
m = re.search(r"^\s*show\s+connections\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', last_update_epoch, 'unixepoch') last_update_datetime FROM qasino_server_connections ORDER BY identity;")
# SHOW info?
m = re.search(r"^\s*show\s+info\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT *, strftime('%Y-%m-%d %H:%M:%f UTC', generation_start_epoch, 'unixepoch') generation_start_datetime FROM qasino_server_info;")
# SHOW views?
m = re.search(r"^\s*show\s+views\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return sql_backend.do_select(txn, "SELECT viewname, loaded, errormsg FROM qasino_server_views ORDER BY viewname;")
# Exit?
m = re.search(r"^\s*(quit|logout|exit)\s*;$", sql, flags=re.IGNORECASE)
if m != None:
return { "retval" : 0, "error_message" : "Bye!" }
return { "retval" : 1, "error_message" : "ERROR: Unrecognized statement: %s" % sql }
def get_table_list(self):
return self.sql_backend_reader.tables
def insert_tables_table(self, txn, sql_backend_writer, sql_backend_writer_static):
table = qasino_table.QasinoTable("qasino_server_tables")
table.add_column("tablename", "varchar")
table.add_column("nr_rows", "int")
table.add_column("nr_updates", "int")
table.add_column("last_update_epoch", "int")
table.add_column("static", "int")
sql_backend_writer.add_tables_table_rows(table)
sql_backend_writer_static.add_tables_table_rows(table)
# the chicken or the egg - how do we add ourselves?
table.add_row( [ "qasino_server_tables",
table.get_nr_rows() + 1,
1,
time.time(),
0 ] )
return sql_backend_writer.add_table_data(txn, table, util.Identity.get_identity())
# This hack insures all the internal tables are inserted
# using the same sql_backend_writer and makes sure that the
# "tables" table is called last (after all the other internal
# tables are added).
def insert_internal_tables(self, txn, sql_backend_writer, sql_backend_reader, db_generation_number, time, generation_duration_s, views):
sql_backend_writer.insert_info_table(txn, db_generation_number, time, generation_duration_s)
sql_backend_writer.insert_connections_table(txn)
if sql_backend_reader != None:
sql_backend_writer.insert_sql_stats_table(txn, sql_backend_reader)
sql_backend_writer.insert_update_stats_table(txn)
# this should be second last so views can be created of any tables above.
# this means though that you can not create views of any tables below.
sql_backend_writer.add_views(txn, views)
sql_backend_writer.insert_views_table(txn, views)
# this should be last to include all the above tables
self.insert_tables_table(txn, sql_backend_writer, self.sql_backend_writer_static)
def async_rotate_dbs(self):
"""
Kick off the rotate in a sqlconnection context because we have
some internal tables and views to add before we rotate dbs.
"""
self.sql_backend_writer.run_interaction(sql_backend.SqlConnections.WRITER_INTERACTION, self.rotate_dbs)
def rotate_dbs(self, txn):
"""
Make the db being written to be the reader db.
Open a new writer db for all new updates.
"""
logging.info("**** DataManager: Starting generation %d", self.db_generation_number)
# Before making the write db the read db,
# add various internal info tables and views.
self.insert_internal_tables(txn,
self.sql_backend_writer,
self.sql_backend_reader,
self.db_generation_number,
time.time(),
self.generation_duration_s,
self.views)
# Increment the generation number.
self.db_generation_number = int(time.time())
# Set the writer to a new db
save_sql_backend_writer = self.sql_backend_writer
# If specified put the generation number in the db name.
db_file_name = self.db_name
if not self.one_db:
db_file_name = self.db_name % self.db_generation_number
self.sql_backend_writer = sql_backend.SqlConnections(db_file_name,
self,
self.archive_db_dir,
self.thread_id,
self.static_db_filepath)
# Set the reader to what was the writer
# Note the reader will (should) be deconstructed here.
# Just in case something else is holding a ref to the reader
# (indefinitely!?) force a shutdown of this backend after a
# certain amount of time though.
if self.sql_backend_reader:
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.writer_dbpool,
self.sql_backend_reader.filename,
None)
reactor.callLater(self.generation_duration_s * 3,
sql_backend.SqlConnections.shutdown,
self.sql_backend_reader.reader_dbpool,
self.sql_backend_reader.filename,
self.sql_backend_reader.archive_db_dir)
self.sql_backend_reader = save_sql_backend_writer
# Load saved tables.
self.async_add_saved_tables()
# Lastly blast out the generation number.
if self.signal_channel != None:
self.signal_channel.send_generation_signal(self.db_generation_number, self.generation_duration_s)
def check_save_table(self, table, identity):
tablename = table.get_tablename()
key = tablename + identity
if table.get_property('persist'):
self.saved_tables[key] = { "table" : table, "tablename" : tablename, "identity" : identity }
else:
# Be sure to remove a table that is no longer persisting.
if key in self.saved_tables:
del self.saved_tables[key]
def async_add_saved_tables(self):
for key, table_data in self.saved_tables.iteritems():
logging.info("DataManager: Adding saved table '%s' from '%s'", table_data["tablename"], table_data["identity"])
self.sql_backend_writer.async_add_table_data(table_data["table"], table_data["identity"])
|
|
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo.config import cfg
from oslo import messaging
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova import rpc
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
1.0 - Initial version.
1.1 - Added migration_update
1.2 - Added instance_get_by_uuid and instance_get_all_by_host
1.3 - Added aggregate_host_add and aggregate_host_delete
1.4 - Added migration_get
1.5 - Added bw_usage_update
1.6 - Added get_backdoor_port()
1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
1.9 - Added provider_fw_rule_get_all
1.10 - Added agent_build_get_by_triple
1.11 - Added aggregate_get
1.12 - Added block_device_mapping_update_or_create
1.13 - Added block_device_mapping_get_all_by_instance
1.14 - Added block_device_mapping_destroy
1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
1.16 - Added instance_destroy
1.17 - Added instance_info_cache_delete
1.18 - Added instance_type_get
1.19 - Added vol_get_usage_by_time and vol_usage_update
1.20 - Added migration_get_unconfirmed_by_dest_compute
1.21 - Added service_get_all_by
1.22 - Added ping
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
1.26 - Added instance_info_cache_update
1.27 - Added service_create
1.28 - Added binary arg to service_get_all_by
1.29 - Added service_destroy
1.30 - Added migration_create
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
1.34 - Added service_update
1.35 - Added instance_get_active_by_window_joined
1.36 - Added instance_fault_create
1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
1.38 - Added service name to instance_update
1.39 - Added notify_usage_exists
1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
1.43 - Added compute_stop
1.44 - Added compute_node_delete
1.45 - Added project_id to quota_commit and quota_rollback
1.46 - Added compute_confirm_resize
1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
1.49 - Added columns_to_join to instance_get_by_uuid
1.50 - Added object_action() and object_class_action()
1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
1.52 - Pass instance objects for compute_confirm_resize
1.53 - Added compute_reboot
1.54 - Added 'update_cells' argument to bw_usage_update
1.55 - Pass instance objects for compute_stop
1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
1.57 - Remove migration_create()
1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
1.59 - Remove instance_info_cache_update()
1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
1.61 - Return deleted instance from instance_destroy()
1.62 - Added object_backport()
1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
1.64 - Added use_slave to instance_get_all_filters()
... - Remove instance_type_get()
... - Remove aggregate_get()
... - Remove aggregate_get_by_host()
... - Remove instance_get()
... - Remove migration_update()
... - Remove block_device_mapping_destroy()
2.0 - Drop backwards compatibility
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
'icehouse': '2.0',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def instance_update(self, context, instance_uuid, updates,
service=None):
updates_p = jsonutils.to_primitive(updates)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_update',
instance_uuid=instance_uuid,
updates=updates_p,
service=service)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
kwargs = {'instance_uuid': instance_uuid,
'columns_to_join': columns_to_join}
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_by_uuid', **kwargs)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
cctxt = self.client.prepare()
return cctxt.call(context,
'migration_get_in_progress_by_host_and_node',
host=host, node=node)
def aggregate_host_add(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_host_add',
aggregate=aggregate_p,
host=host)
def aggregate_host_delete(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_host_delete',
aggregate=aggregate_p,
host=host)
def aggregate_metadata_get_by_host(self, context, host, key):
cctxt = self.client.prepare()
return cctxt.call(context, 'aggregate_metadata_get_by_host',
host=host,
key=key)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None, update_cells=True):
msg_kwargs = dict(uuid=uuid, mac=mac, start_period=start_period,
bw_in=bw_in, bw_out=bw_out, last_ctr_in=last_ctr_in,
last_ctr_out=last_ctr_out,
last_refreshed=last_refreshed,
update_cells=update_cells)
cctxt = self.client.prepare()
return cctxt.call(context, 'bw_usage_update', **msg_kwargs)
def provider_fw_rule_get_all(self, context):
cctxt = self.client.prepare()
return cctxt.call(context, 'provider_fw_rule_get_all')
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
cctxt = self.client.prepare()
return cctxt.call(context, 'agent_build_get_by_triple',
hypervisor=hypervisor, os=os,
architecture=architecture)
def block_device_mapping_update_or_create(self, context, values,
create=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'block_device_mapping_update_or_create',
values=values, create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'block_device_mapping_get_all_by_instance',
instance=instance_p, legacy=legacy)
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None,
use_slave=False):
msg_kwargs = dict(filters=filters, sort_key=sort_key,
sort_dir=sort_dir, columns_to_join=columns_to_join,
use_slave=use_slave)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_all_by_filters', **msg_kwargs)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_active_by_window_joined',
begin=begin, end=end, project_id=project_id,
host=host)
def instance_destroy(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_destroy', instance=instance_p)
def instance_info_cache_delete(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
cctxt.call(context, 'instance_info_cache_delete', instance=instance_p)
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
cctxt = self.client.prepare()
return cctxt.call(context, 'vol_get_usage_by_time',
start_time=start_time_p)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'vol_usage_update',
vol_id=vol_id, rd_req=rd_req,
rd_bytes=rd_bytes, wr_req=wr_req,
wr_bytes=wr_bytes,
instance=instance_p, last_refreshed=last_refreshed,
update_totals=update_totals)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_get_all_by',
topic=topic, host=host, binary=binary)
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_get_all_by_host',
host=host, node=node,
columns_to_join=columns_to_join)
def instance_fault_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'instance_fault_create', values=values)
def action_event_start(self, context, values):
values_p = jsonutils.to_primitive(values)
cctxt = self.client.prepare()
return cctxt.call(context, 'action_event_start', values=values_p)
def action_event_finish(self, context, values):
values_p = jsonutils.to_primitive(values)
cctxt = self.client.prepare()
return cctxt.call(context, 'action_event_finish', values=values_p)
def service_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_create', values=values)
def service_destroy(self, context, service_id):
cctxt = self.client.prepare()
return cctxt.call(context, 'service_destroy', service_id=service_id)
def compute_node_create(self, context, values):
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_create', values=values)
def compute_node_update(self, context, node, values):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_update',
node=node_p, values=values)
def compute_node_delete(self, context, node):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_node_delete', node=node_p)
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
cctxt = self.client.prepare()
return cctxt.call(context, 'service_update',
service=service_p, values=values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_get',
task_name=task_name, begin=begin, end=end,
host=host, state=state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_begin_task',
task_name=task_name,
begin=begin, end=end, host=host,
task_items=task_items, message=message)
def task_log_end_task(self, context, task_name, begin, end, host, errors,
message=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'task_log_end_task',
task_name=task_name, begin=begin, end=end,
host=host, errors=errors, message=message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
instance_p = jsonutils.to_primitive(instance)
system_metadata_p = jsonutils.to_primitive(system_metadata)
extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
cctxt = self.client.prepare()
return cctxt.call(
context, 'notify_usage_exists',
instance=instance_p,
current_period=current_period,
ignore_missing_network_data=ignore_missing_network_data,
system_metadata=system_metadata_p,
extra_usage_info=extra_usage_info_p)
def security_groups_trigger_handler(self, context, event, args):
args_p = jsonutils.to_primitive(args)
cctxt = self.client.prepare()
return cctxt.call(context, 'security_groups_trigger_handler',
event=event, args=args_p)
def security_groups_trigger_members_refresh(self, context, group_ids):
cctxt = self.client.prepare()
return cctxt.call(context, 'security_groups_trigger_members_refresh',
group_ids=group_ids)
def network_migrate_instance_start(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare()
return cctxt.call(context, 'network_migrate_instance_start',
instance=instance_p, migration=migration_p)
def network_migrate_instance_finish(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare()
return cctxt.call(context, 'network_migrate_instance_finish',
instance=instance_p, migration=migration_p)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
reservations_p = jsonutils.to_primitive(reservations)
cctxt = self.client.prepare()
return cctxt.call(context, 'quota_commit',
reservations=reservations_p,
project_id=project_id, user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
reservations_p = jsonutils.to_primitive(reservations)
cctxt = self.client.prepare()
return cctxt.call(context, 'quota_rollback',
reservations=reservations_p,
project_id=project_id, user_id=user_id)
def get_ec2_ids(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'get_ec2_ids',
instance=instance_p)
def compute_unrescue(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare()
return cctxt.call(context, 'compute_unrescue', instance=instance_p)
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_class_action',
objname=objname, objmethod=objmethod,
objver=objver, args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport(self, context, objinst, target_version):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_backport', objinst=objinst,
target_version=target_version)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None):
if self.client.can_send_version('1.6'):
version = '1.6'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
flavor_p = jsonutils.to_primitive(flavor)
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server',
instance=instance, scheduler_hint=scheduler_hint,
live=live, rebuild=rebuild, flavor=flavor_p,
block_migration=block_migration,
disk_over_commit=disk_over_commit,
reservations=reservations)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(version='1.5')
cctxt.cast(context, 'build_instances',
instances=instances, image=image_p,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
cctxt = self.client.prepare(version='1.3')
cctxt.cast(context, 'unshelve_instance', instance=instance)
|
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
'''
Causes one or several :xfile:`help_texts.py` files to be generated
after each complete build of the doctree.
See :doc:`/dev/help_texts` for a topic overview.
Usage
=====
In your :xfile:`conf.py` file, add
:mod:`lino.sphinxcontrib.help_texts_extractor` to your ``extensions``
and define a ``help_texts_builder_targets`` setting::
extensions += ['lino.sphinxcontrib.help_texts_extractor']
help_texts_builder_targets = {
'lino_algus.': 'lino_algus.lib.algus'
}
Internals
=========
This builder traverses the doctree in order to find `object
descriptions
<http://www.sphinx-doc.org/en/stable/extdev/nodes.html>`_, i.e. text
nodes defined by Sphinx and inserted e.g. by the :rst:dir:`class` and
:rst:dir:`attribute` directives (which potentially have been inserted
by autodoc and autosummary).
Example of a class description::
<desc desctype="class" domain="py" noindex="False" objtype="class">
<desc_signature class="" first="False" fullname="Plan" ids="..." module="..." names="...">
<desc_annotation>class </desc_annotation>
<desc_addname>lino_xl.lib.invoicing.models.</desc_addname>
<desc_name>Plan</desc_name>
<desc_parameterlist>
<desc_parameter>*args</desc_parameter>
<desc_parameter>**kwargs</desc_parameter>
</desc_parameterlist>
</desc_signature>
<desc_content>
<paragraph>Bases: <reference internal="False" reftitle="(in Lino v1.7)" refuri="http://www.lino-framework.org/api/lino.modlib.users.mixins.html#lino.modlib.users.mixins.UserAuthored"><literal classes="xref py py-class">lino.modlib.users.mixins.UserAuthored</literal></reference>
</paragraph>
<paragraph>An <strong>invoicing plan</strong> is a rather temporary database object which represents the plan of a given user to have Lino generate a series of invoices.
</paragraph>
<index entries="..."/>
<desc desctype="attribute" objtype="attribute">
<desc_signature class="Plan" first="False" fullname="Plan.user" ids="..." module="..." names="...">
<desc_name>user</desc_name>
</desc_signature>
<desc_content/>
</desc>
<desc desctype="attribute" ... objtype="attribute">
<desc_signature class="Plan" first="False" fullname="Plan.journal" ids="..." module="..." names="...">
<desc_name>journal</desc_name>
</desc_signature>
<desc_content>
<paragraph>The journal where to create invoices. When this field is
empty, you can fill the plan with suggestions but cannot
execute the plan.</paragraph>
</desc_content>
</desc>
...
Example of a field description::
<desc desctype="attribute" domain="py" noindex="False" objtype="attribute">
<desc_signature class="Plan" first="False" fullname="Plan.journal"
ids="lino_xl.lib.invoicing.models.Plan.journal"
module="lino_xl.lib.invoicing.models"
names="lino_xl.lib.invoicing.models.Plan.journal">
<desc_name>journal</desc_name>
</desc_signature>
<desc_content>
<paragraph>
The journal where to create invoices. When this field is
empty, you can fill the plan with suggestions but cannot
execute the plan.
</paragraph>
</desc_content>
</desc>
'''
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import six
from docutils import nodes
from docutils import core
from sphinx import addnodes
from importlib import import_module
from unipath import Path
from lino.core.utils import simplify_name
useless_starts = set(['lino.core'])
useless_endings = set(['.My', '.ByUser'])
# useless_endings = set(['.VentilatingTable', '.My', '.ByUser',
# '.Table', '.AbstractTable', '.VirtualTable',
# '.Actor'])
HEADER = """# -*- coding: UTF-8 -*-
# generated by lino.sphinxcontrib.help_text_builder
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
"""
def node2html(node):
parts = core.publish_from_doctree(node, writer_name="html")
return parts['body']
class HelpTextExtractor(object):
def initialize(self, app):
self.name2dict = dict()
self.name2file = dict()
# we must write our files only when all documents have been
# processed (i.e. usually after a "clean")
self.docs_processed = 0
targets = app.env.config.help_texts_builder_targets
# print(20160725, targets)
for root, modname in targets.items():
mod = import_module(modname)
htf = Path(mod.__file__).parent.child('help_texts.py')
# if not htf.exists():
# raise Exception("No such file: {}".format(htf))
self.name2file[root] = htf
self.name2dict[root] = OrderedDict()
print("Collecting help texts for {}".format(
' '.join(self.name2file.values())))
def extract_help_texts(self, app, doctree):
# if docname != 'api/lino_xl.lib.invoicing.models':
# return
# print(doctree)
# return
# for node in doctree.traverse():
# self.node_classes.add(node.__class__)
for node in doctree.traverse(addnodes.desc):
if node['domain'] == 'py':
if node['objtype'] == 'class':
self.store_content(node)
elif node['objtype'] == 'attribute':
self.store_content(node)
# for node in doctree.traverse(nodes.field):
# self.fields.add(node.__class__)
self.docs_processed += 1
def write_help_texts_files(self, app, exception):
if exception:
return
if self.docs_processed < len(app.env.found_docs):
app.info(
"Don't write help_texts.py files because "
"only {0} of {1} docs have been processed".format(
self.docs_processed,
len(app.env.found_docs)))
return
for k, fn in self.name2file.items():
texts = self.name2dict.get(k, None)
if not texts:
app.info("No help texts for {}".format(k))
continue
# fn = os.path.join(self.outdir, 'help_texts.py')
print("Writing {} help texts for {} to {}".format(
len(texts), k, fn))
fd = open(fn, "w")
def writeln(s):
if six.PY2:
s = s.encode('utf-8')
fd.write(s)
fd.write("\n")
writeln(HEADER)
writeln("help_texts = {")
for k, v in texts.items():
writeln(''' '{}' : _("""{}"""),'''.format(k, v))
writeln("}")
fd.close()
def store_content(self, node):
sig = []
content = []
for c in node.children:
if isinstance(c, addnodes.desc_content):
for cc in c.children:
if isinstance(cc, nodes.paragraph):
p = cc.astext()
if not p.startswith("Bases:"):
if len(content) == 0:
content.append(p)
elif isinstance(c, addnodes.desc_signature):
sig.append(c)
# if len(sig) != 1:
# raise Exception("sig is {}!".format(sig))
sig = sig[0]
# sig = list(node.traverse(addnodes.desc_signature))[0]
# content = [
# p.astext() for p in node.traverse(addnodes.desc_content)]
# content = [p for p in content if not p.startswith("Bases:")]
if not content:
return
content = '\n'.join(content)
if '"""' in content:
msg = '{} : First paragraph of content may not contain \'"""\'. '
raise Exception(msg.format(sig['names'][0]))
if content.startswith('"'):
content = " " + content
if content.endswith('"'):
content += " "
# msg = '{} : First paragraph of content may not end with \'"\'.'
# self.warn(msg.format(sig['names'][0]))
for name in sig['names']:
self.sig2dict(name, content)
def sig2dict(self, name, value):
for e in useless_starts:
if name.startswith(e):
return
for e in useless_endings:
if name.endswith(e):
return
name = simplify_name(name)
for root, d in self.name2dict.items():
if name.startswith(root):
d[name] = value
def setup(app):
hte = HelpTextExtractor()
app.add_config_value('help_texts_builder_targets', {}, 'env')
app.connect('builder-inited', hte.initialize)
app.connect('doctree-read', hte.extract_help_texts)
app.connect('build-finished', hte.write_help_texts_files)
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import time
from cloudify.decorators import workflow
from cloudify.workflows import api
from cloudify.exceptions import NonRecoverableError
@workflow
def execute_operation(ctx, operation, properties, node_id, **_):
node_instance = list(ctx.get_node(node_id).instances)[0]
node_instance.execute_operation(
operation=operation,
kwargs=properties).get()
@workflow
def sleep(ctx, **kwargs):
node_instance = get_instance(ctx)
node_instance.execute_operation(
'test_interface.operation',
kwargs={'key': 'before-sleep',
'value': None}).get()
node_instance.set_state('asleep').get()
time.sleep(10)
node_instance.execute_operation(
'test_interface.operation',
kwargs={'key': 'after-sleep',
'value': None}).get()
@workflow
def sleep_with_cancel_support(ctx, use_legacy_cancel, **kwargs):
node_instance = get_instance(ctx)
node_instance.execute_operation(
'test_interface.operation',
kwargs={'key': 'before-sleep',
'value': None}).get()
node_instance.set_state('asleep').get()
is_cancelled = False
for i in range(10):
if api.has_cancel_request():
is_cancelled = True
break
time.sleep(1)
if is_cancelled:
if use_legacy_cancel:
return api.EXECUTION_CANCELLED_RESULT
else:
raise api.ExecutionCancelled()
node_instance.execute_operation(
'test_interface.operation',
kwargs={'key': 'after-sleep',
'value': None}).get()
@workflow
def sleep_with_graph_usage(ctx, **kwargs):
graph = ctx.graph_mode()
sequence = graph.sequence()
node_instance = get_instance(ctx)
sequence.add(
node_instance.execute_operation(
'test_interface.operation',
kwargs={'key': 'before-sleep',
'value': None}),
node_instance.set_state('asleep'),
node_instance.execute_operation(
'test_interface.sleep_operation',
kwargs={'sleep': '10'}),
node_instance.execute_operation(
'test_interface.operation',
kwargs={'key': 'after-sleep',
'value': None}))
graph.execute()
@workflow
def test_simple(ctx, do_get, key, value, **_):
instance = get_instance(ctx)
set_state_result = instance.set_state(
'test_state')
if do_get:
set_state_result.get()
execute_operation_result = instance.execute_operation(
'test.op1',
kwargs={'key': key, 'value': value})
if do_get:
execute_operation_result.get()
@workflow
def test_cancel_on_wait_for_task_termination(ctx, do_get, **_):
instance = get_instance(ctx)
result = instance.execute_operation('test.sleep', kwargs={'sleep': 100000})
if do_get:
result.get()
@workflow
def test_cancel_on_task_retry_interval(ctx, do_get, **_):
instance = get_instance(ctx)
result = instance.execute_operation('test.fail')
if do_get:
result.get()
@workflow
def test_illegal_non_graph_to_graph_mode(ctx, **_):
ctx.send_event('sending event')
ctx.graph_mode()
@workflow
def test_fail_remote_task_eventual_success(ctx, do_get, **_):
result = get_instance(ctx).execute_operation('test.op2')
if do_get:
result.get()
@workflow
def test_fail_remote_task_eventual_failure(ctx, do_get, **_):
result = get_instance(ctx).execute_operation('test.op3')
if do_get:
result.get()
@workflow
def test_fail_local_task_eventual_success(ctx, do_get, **_):
test_fail_local_task(ctx, should_fail=False, do_get=do_get)
@workflow
def test_fail_local_task_eventual_failure(ctx, do_get, **_):
test_fail_local_task(ctx, should_fail=True, do_get=do_get)
def test_fail_local_task(ctx, should_fail, do_get):
state = []
# mock local task
def fail():
state.append(time.time())
# only fail twice, succeed on the third attempt
# (unless should_fail=True)
if len(state) == 3 and not should_fail:
return
raise RuntimeError('FAIL')
# execute the task (with retries)
try:
result = ctx.local_task(fail)
if do_get:
result.get()
except Exception:
if should_fail:
pass
else:
raise
else:
if do_get and should_fail:
raise RuntimeError('Task should have failed')
# make assertions
if do_get:
if len(state) != 3:
raise RuntimeError('Expected 3 invocations, got {}'
.format(len(state)))
for i in range(len(state) - 1):
if state[i+1] - state[i] < 1:
raise RuntimeError('Expected at least 1 second between each '
'invocation')
@workflow
def test_fail_local_task_on_nonrecoverable_error(ctx, do_get, **_):
state = []
# mock local task
def fail():
state.append(time.time())
raise NonRecoverableError('FAIL')
# execute the task (with retries)
try:
result = ctx.local_task(fail)
if do_get:
result.get()
except Exception:
pass
else:
raise RuntimeError('Task should have failed')
# make assertions
if do_get:
if len(state) != 1:
raise RuntimeError('Expected 1 invocation, got {}'
.format(len(state)))
@workflow
def test_policies_1(ctx, key, value,
custom_key=None,
custom_value=None,
**_):
instance = list(ctx.get_node('node').instances)[0]
instance.execute_operation('test.op1', {
'key': key,
'value': value,
})
instance.execute_operation('test.op1', {
'key': custom_key,
'value': custom_value
})
@workflow
def test_policies_2(ctx, key, value, **_):
instance = list(ctx.get_node('node').instances)[0]
instance.execute_operation('test.op1', kwargs={
'key': key,
'value': value
})
@workflow
def test_policies_3(ctx, key, value, **_):
instance = list(ctx.get_node('node').instances)[0]
instance.execute_operation('test.op1', kwargs={
'key': key,
'value': value
})
@workflow
def auto_heal_vm(ctx, node_id, diagnose_value=None, **_):
instance = ctx.get_node_instance(node_id)
instance.execute_operation('test.op1', kwargs={
'params': {
'failing_node': node_id,
'diagnose': diagnose_value
}
})
@workflow
def operation_mapping1(ctx, **_):
node1 = list(ctx.get_node('node1').instances)[0]
node2_rel = list(list(ctx.get_node('node2').instances)[0].relationships)[0]
node3_rel = list(list(ctx.get_node('node3').instances)[0].relationships)[0]
node1.execute_operation('test.operation')
node2_rel.execute_source_operation('test.operation')
node3_rel.execute_target_operation('test.operation')
@workflow
def operation_mapping2(ctx, value, **_):
node1 = list(ctx.get_node('node1').instances)[0]
node2_rel = list(list(ctx.get_node('node2').instances)[0].relationships)[0]
node3_rel = list(list(ctx.get_node('node3').instances)[0].relationships)[0]
node1.execute_operation('test.operation', kwargs={
'value': value
}, allow_kwargs_override=True)
node2_rel.execute_source_operation('test.operation', kwargs={
'value': value
}, allow_kwargs_override=True)
node3_rel.execute_target_operation('test.operation', kwargs={
'value': value
}, allow_kwargs_override=True)
@workflow
def operation_mapping3(ctx, value, **_):
def expect_error(func):
try:
func('test.operation', kwargs={
'value': value
}).get()
except RuntimeError as e:
assert 'Duplicate' in str(e)
node1 = list(ctx.get_node('node1').instances)[0]
node2_rel = list(list(ctx.get_node('node2').instances)[0].relationships)[0]
node3_rel = list(list(ctx.get_node('node3').instances)[0].relationships)[0]
expect_error(node1.execute_operation)
expect_error(node2_rel.execute_source_operation)
expect_error(node3_rel.execute_target_operation)
@workflow
def deployment_modification_finish(ctx, nodes, **_):
_deployment_modification_impl(ctx, nodes, 'finish')
@workflow
def deployment_modification_rollback(ctx, nodes, **_):
_deployment_modification_impl(ctx, nodes, 'rollback')
def _deployment_modification_impl(ctx, nodes, end_func_name):
modification = ctx.deployment.start_modification(nodes)
for node in modification.added.nodes:
for instance in node.instances:
instance.execute_operation('test.op', kwargs={
'modification': instance.modification,
'relationships': [(instance.id, rel.target_id)
for rel in instance.relationships]
}).get()
for node in modification.removed.nodes:
for instance in node.instances:
instance.execute_operation('test.op', kwargs={
'modification': instance.modification,
'relationships': [rel.target_id
for rel in instance.relationships]
}).get()
getattr(modification, end_func_name)()
@workflow
def deployment_modification_operations(ctx, **_):
modification = ctx.deployment.start_modification(
{'compute': {'instances': 2}})
for node in modification.added.nodes:
for instance in node.instances:
if instance.node_id == 'compute':
if (len(instance.contained_instances) != 1 or
instance.contained_instances[0].node_id != 'db'):
raise RuntimeError(
'Expected one db contained instance, got {0}'
.format(instance.contained_instances))
instance.execute_operation('test.op').get()
else:
instance.execute_operation('test.op').get()
for rel in instance.relationships:
rel.execute_source_operation('test.op').get()
rel.execute_target_operation('test.op').get()
modification.finish()
def get_instance(ctx):
return next(next(ctx.nodes).instances)
@workflow
def not_exist_operation_workflow(ctx, **kwargs):
node_instance = get_instance(ctx)
node_instance.execute_operation('test.operation')
@workflow
def not_exist_operation_graph_mode_workflow(ctx, **kwargs):
graph = ctx.graph_mode()
sequence = graph.sequence()
node_instance = get_instance(ctx)
sequence.add(node_instance.execute_operation('test.operation'))
return graph.execute()
@workflow
def ignore_handler_on_not_exist_operation_workflow(ctx, **kwargs):
graph = ctx.graph_mode()
sequence = graph.sequence()
node_instance = get_instance(ctx)
operation = node_instance.execute_operation('test.operation')
sequence.add(operation)
def _ignore_on_error_handler(tsk):
from cloudify.workflows import tasks as workflow_tasks
return workflow_tasks.HandlerResult.ignore()
operation.on_failure = _ignore_on_error_handler
return graph.execute()
@workflow
def retry_handler_on_not_exist_operation_workflow(ctx, **kwargs):
graph = ctx.graph_mode()
sequence = graph.sequence()
node_instance = get_instance(ctx)
operation = node_instance.execute_operation('test.operation')
sequence.add(operation)
def _ignore_on_error_handler(tsk):
from cloudify.workflows import tasks as workflow_tasks
return workflow_tasks.HandlerResult.retry()
operation.on_failure = _ignore_on_error_handler
return graph.execute()
@workflow
def continue_handler_on_not_exist_operation_workflow(ctx, **kwargs):
graph = ctx.graph_mode()
sequence = graph.sequence()
node_instance = get_instance(ctx)
operation = node_instance.execute_operation('test.operation')
sequence.add(operation)
def _ignore_on_error_handler(tsk):
from cloudify.workflows import tasks as workflow_tasks
return workflow_tasks.HandlerResult.cont()
operation.on_failure = _ignore_on_error_handler
return graph.execute()
@workflow
def fail_handler_on_not_exist_operation_workflow(ctx, **kwargs):
graph = ctx.graph_mode()
sequence = graph.sequence()
node_instance = get_instance(ctx)
operation = node_instance.execute_operation('test.operation')
sequence.add(operation)
def _ignore_on_error_handler(tsk):
from cloudify.workflows import tasks as workflow_tasks
return workflow_tasks.HandlerResult.fail()
operation.on_failure = _ignore_on_error_handler
return graph.execute()
@workflow
def read_scaling_groups(ctx, **kwargs):
node_instance = get_instance(ctx)
node_instance.execute_operation(
'test.operation',
kwargs={'scaling_groups': ctx.deployment.scaling_groups})
@workflow
def do_nothing(ctx, **kwargs):
pass
@workflow
def non_recoverable(ctx, **_):
raise NonRecoverableError('FAIL')
@workflow
def simple_sleep(ctx, **kwargs):
time.sleep(30)
|
|
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
from collections import defaultdict
import warnings
import logging
from .json import json
from ..common import six
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
logger = logging.getLogger(__name__)
def _flatten(container):
for i in container:
if isinstance(i, (list, tuple)):
for j in _flatten(i):
yield j
else:
yield i
def _encode_tree(x):
if isinstance(x, list):
return json.dumps(x)
else:
return x
def _traverse_tree(t, include=None, encode=None):
if encode is not None:
t = encode(t)
if include is False:
return
if isinstance(t, list):
for i in t:
for b in _traverse_tree(i, include, encode):
yield b
elif isinstance(t, Mapping):
for k in t:
if include is None or include is True:
for i in _traverse_tree(t[k], encode=encode):
yield k, i
else:
if not include.get(k, False):
continue
for i in _traverse_tree(t[k], include.get(k), encode=encode):
yield k, i
else:
yield t
def _traverse_filter(t, include=None):
for b in _traverse_tree(t, include=include, encode=_encode_tree):
yield b
_traverse_docs = _traverse_filter
def _valid_filter(f, top=True):
if isinstance(f, Mapping):
return all(_valid_filter(v, top=False) for v in f.values())
elif isinstance(f, list):
return not top
else:
return True
class DocumentSearchEngine(object):
"""Search for documents as part of an index.
Use the DocumentSearchEngine to search for specific
key-value pairs within a list of documents.
Each document must have a unique identifier.
Use the include argument to control what keys
are indexed and which are not. This may increase
indexing speed and reduce memory usage. See
:meth:`~.check_filter` for more information.
:param docs: A set of documents to index.
:type docs: list
:param include: A mapping of keys that shall be
included (True) or excluded (False).
:type include: Mapping
:param hash_: The hash function to use, defaults to :func:`hash`.
:type hash_: callable
"""
def __init__(self, docs=None, include=None, hash_=None):
warnings.warn(
"The {} class is deprecated. Please use the Collection class instead.".format(
type(self).__name__),
DeprecationWarning)
self._hash = hash if hash_ is None else hash_
logger.debug("Building index...")
self.ids, self.index, self.included = self._build_index(docs, include)
logger.debug("Built index with {} entries.".format(len(self.index)))
def _build_index(self, docs, include=None):
index = defaultdict(set)
ids = set()
if include is None:
included = None
else:
included = dict()
for branch in _traverse_docs(include):
f = tuple(_flatten(branch))
included[self._hash(f[:-1])] = f[-1]
if docs is not None:
for doc in docs:
ids.add(doc['_id'])
for branch in _traverse_docs(doc, include=include):
f = tuple(_flatten(branch))
index[self._hash(f)].add(doc['_id'])
return ids, index, included
def _filter_supported(self, filter):
if self.included is None:
return True
else:
for branch in _traverse_tree(filter):
f = tuple(_flatten(branch))
for i in range(len(f)):
h = self._hash(f[:-i])
if self.included.get(h, False):
break
else:
return False
else:
return True
def check_filter(self, filter):
"""Check whether the filter is valid and supported.
Not all filters are supported when the search engine
is build with specific keys to be included or excluded.
Example:
.. code-block:: python
incl = {'a': True, 'b': {'c': False, 'd': True}}
engine = DocumentSearchEngine(docs, incl)
# Examples for supported filters:
engine.find({'a': x})
engine.find({'a': x, 'b': y})
engine.find({'b': {'d': z}})
# Examples for filters that are not supported:
engine.find({'b': {'c': x}})
engine.find({'b': {'e': y}}) # *)
engine.find({'c': z}) # *)
*) Once one key within one hierarchy level is specified
to be either included or excluded, all other keys within
the same level are automatically excluded.
:param filter: The filter to be checked.
:type filter: Mapping
:raises ValueError: If the filter is invalid.
:raises RuntimeError: If the filter is not supported
by the index.
"""
if filter is None:
return True
if not _valid_filter(filter):
raise ValueError(filter)
elif not self._filter_supported(filter):
msg = "{} not indexed for filter: '{}'."
raise RuntimeError(msg.format(type(self).__name__, filter))
def find(self, filter=None):
"""Find all documents matching filter.
:param filter: A mapping of key-value pairs that
all indexed documents are compared against.
:type filter: Mapping
:yields: The ids of all indexed documents matching the
filter.
:raises ValueError: If the filter is invalid.
:raises RuntimeError: If the filter is not supported
by the index.
"""
self.check_filter(filter)
if filter is None or not len(filter):
return _DocumentSearchEngineResults(self.ids)
else:
result = None
for branch in _traverse_filter(filter):
h = self._hash(tuple(_flatten(branch)))
m = self.index.get(h, set())
if result is None:
result = m
continue
if m is None:
return
else:
result = result.intersection(m)
if result is None:
return
else:
return _DocumentSearchEngineResults(result)
def __len__(self):
"""Return the number of indexed documents."""
return len(self.ids)
class _DocumentSearchEngineResults(object):
def __init__(self, ids):
self._ids = ids
def __len__(self):
return len(self._ids)
def __iter__(self):
return iter(self._ids)
|
|
import urllib, ssl
from socket import timeout
####################################################################################################
PREFIX = "/video/letmestream"
NAME = "LetMeStream"
ICON = 'icon_default.png'
####################################################################################################
lmsToken = Prefs['lmstoken']
collTvShows = {}
collMovies = {}
sessionsCalled = {}
def Start():
ObjectContainer.title1 = NAME
@handler(PREFIX, NAME)
def MainMenu():
if not Prefs['lmstoken']:
return ObjectContainer(header=L('Not configured'), message=L('No LetMeStream token configured.'))
oc = ObjectContainer()
oc.add(DirectoryObject(key=Callback(TvShows), title=L("TvShows"), thumb=R('tvshows.png')))
oc.add(DirectoryObject(key=Callback(Movies), title=L("Movies"), thumb=R('Movies.png')))
return oc
def setItem(key, value):
return Data.SaveObject('lms' + str(key), value)
def getItem(key):
if Data.Exists('lms' + str(key)):
return Data.LoadObject('lms' + str(key))
return None
def ValidatePrefs():
return True
@route(PREFIX + '/TvShow', itemId = int)
def GetTvShow(itemId):
oc = ObjectContainer()
item = getItem(itemId)
if not item:
raise Ex.MediaNotAvailable
oc.title1 = item['title']
ObjectContainer.art = Callback(Thumb, url=item['backdrop'])
url = 'http://cdn.letmestream.com/api/plex/episodes/' + str(item['mediaParentId']) + '?token=' + lmsToken
content = HTTP.Request(url, cacheTime = CACHE_1DAY).content
seasons = JSON.ObjectFromString(content)['seasons']
item['seasons'] = seasons
collTvShows[str(item['id'])] = item
setItem(item['id'], item)
for season in seasons:
if season['season'] > 0:
oc.add(SeasonObject(show=item['title'], episode_count=len(season['episodes']), key=Callback(TvShowSeason, itemId = item['id'], seasonInt = season['season']), rating_key=str(item['id']) + '-' + str(season['season']), title="Season " + str(season['season']), thumb=Callback(Thumb, url=item['poster'] + '?' + str(season['season']))))
return oc
@route(PREFIX + '/TvShow/season', itemId = int, seasonInt = int)
def TvShowSeason(itemId, seasonInt):
oc = ObjectContainer()
item = getItem(itemId)
oc.title1 = item['title'] + ' - Season ' + str(seasonInt)
ObjectContainer.art = Callback(Thumb, url=item['backdrop'])
fullIndex = 0
for season in item['seasons']:
if season['season'] == seasonInt:
episodes = season['episodes']
for episode in episodes:
fullIndex = fullIndex + 1
episode = JSON.ObjectFromString(episode)
episode = episodes[str(episode)]
Log(episode)
episode['backdrop'] = item['backdrop']
episode['type'] = 'episode'
episode['poster'] = episode['thumb']
episode['title'] = episode['key'] + ' - ' + (episode['title'].replace(episode['key'] + ' - ', ''))
openSubtitlesHash = None
try:
if episode['subtitlesHash'] and len(episode['subtitlesHash']) > 0:
openSubtitlesHash = episode['subtitlesHash'][0]
except:
pass
setItem(episode['id'], episode)
epdObject = EpisodeObject(key=Callback(videoClipFromItem, itemId = episode['id'], include_container = True), season = episode['season'], absolute_index = fullIndex, rating_key=episode['title'], title=episode['title'], art=Callback(Thumb, url = item['backdrop']), thumb=Callback(Thumb, url = episode['poster'], failback = item['poster']), summary = episode['overview'])
epdObject.show = item['title']
epdObject.season = seasonInt
epdObject.absolute_index = fullIndex
epdObject.source_title = 'LetMeStream'
oc.add(epdObject)
break
else:
fullIndex = fullIndex + len(season['episodes'])
return oc
@route(PREFIX + '/TvShows')
def TvShows(oc = None):
oc = ObjectContainer()
oc.title1 = L('TvShows')
i = 0
items = []
collTvShows = {}
start = 0
end = 14
while i < 100:
items = getItems('genretvshowall', start, end)
for item in items:
try:
if not item['id'] or collTvShows[str(item['id'])] or not item['title']:
continue
except:
pass
item['type'] = 'show'
collTvShows[str(item['id'])] = item
setItem(item['id'], item)
try:
itemKey = item['title'] + '#' + str(item['id'])
oc.add(TVShowObject(key=Callback(GetTvShow, itemId = item['id']), rating_key=itemKey, title=item['title'], art=Callback(Thumb, url = item['backdrop']), thumb=Callback(Thumb, url = item['poster'])))
except:
pass
if len(items) < 1:
break
start += len(items)
i += 1
return oc
@route(PREFIX + '/lmsMovies')
def Movies(oc = None):
oc = ObjectContainer()
oc.title1 = L('Movies')
i = 0
items = []
start = 0
end = 14
collMovies = {}
while i < 100:
items = getItems('genreall', start, end)
for item in items:
try:
if not item['id'] or collMovies[str(item['id'])] or not item['title']:
continue
except:
pass
item['type'] = 'movie'
collMovies[str(item['id'])] = item
setItem(item['id'], item)
try:
oc.add(MovieObject(key=Callback(videoClipFromItem, itemId = item['id'], include_container = True), rating_key=item['title'], title=item['title'], art=Callback(Thumb, url = item['backdrop']), thumb=Callback(Thumb, url = item['poster'])))
except:
pass
if len(items) < 1:
break
start += len(items)
i += 1
return oc
def getUrl(item):
itemUrl = 'lms://' + str(item['locationsInfos'][0]['id']) + ':' + Prefs['lmstoken'] + ':0:' + Prefs['subtitlesLanguage']
try:
return itemUrl
except:
return None
@route(PREFIX + '/thumb', url = str)
def Thumb(url, failback = None):
try:
url = url.replace('https://', 'http://')
data = HTTP.Request(url, cacheTime = CACHE_1MONTH).content
return DataObject(data, 'image/jpeg')
except:
if failback:
return Redirect(failback)
return Redirect(R(ICON))
@route(PREFIX + '/media/videoclip', itemId = int)
def videoClipFromItem(itemId, include_container = False, includeRelated = False, includeRelatedCount = False, includeExtras = False):
item = getItem(itemId)
return CreateVideoClipObject(
itemType = item['type'],
item = item,
url = getUrl(item),
title = item['title'],
summary = item['overview'],
thumb = item['poster'],
backdrop = item['backdrop'],
mediaLocationId = item['mediaLocationId'],
mediaItemId = item['mediaItemId'],
mediaFileId = item['mediaFileId'],
include_container = include_container,
includeRelated = includeRelated,
includeExtras = includeExtras
)
def CreateVideoClipObject(itemType, item, url, title, summary, thumb, backdrop, mediaLocationId, mediaItemId, mediaFileId, include_container, includeRelated, includeExtras):
videoContainer = 'mp4'
videoCodec = VideoCodec.H264
videoResolution = '544'
audioCodec = AudioCodec.AAC
optimizedForStreaming = 1
videoHeight = 1
videoWidth = 1
duration = 0
videoProtocol = 'HTTPMP4Video'
classmap = {
'generic': VideoClipObject,
'movie': MovieObject,
'episode': EpisodeObject,
'show': TVShowObject,
'season': SeasonObject
}
if itemType == 'episode' and not include_container:
videoclip_obj = VideoClipObject(
url = url,
rating_key = url,
title = title,
art = Callback(Thumb, url=backdrop),
thumb = Callback(Thumb, url=thumb)
)
else :
videoclip_obj = classmap[itemType](
url = url,
rating_key = url,
title = title,
art = Callback(Thumb, url=backdrop),
thumb = Callback(Thumb, url=thumb),
summary = summary
)
if include_container:
return ObjectContainer(objects=[videoclip_obj])
else:
return videoclip_obj
def getItems(itemsType, start=0, end=14):
try:
if not start:
start = str(0)
url = 'http://cdn.letmestream.com/api/plex/frontParse/' + itemsType + '/' + str(start) + ',' + str(int(end)) + '?token=' + lmsToken
items = JSON.ObjectFromString(HTTP.Request(url, cacheTime = CACHE_1DAY).content)['items']
return items
except:
return []
|
|
r"""Routines to create data from external modellers, for comparison purposes.
- DIPOLE1D: You must have Dipole1D installed and it must be in your system
path; https://software.seg.org/2012/0003.
- EMmod: You must have Dipole1D installed and it must be in your system
path; https://software.seg.org/2015/0001.
- Green3D: You must have Green3D installed (for which you need to be a member
of the CEMI consortium). The following files must be in the folder
`empymod/tests/green3d`: `green3d.m`, `grint.mexa64`,
`grint.mexw64`,`normal.mexa64`, and `normal.mexw64`. Furthermore, you need
Matlab.
Tested only on Linux (Ubuntu 16.04 LTS, x86_64).
Warning: These functions are to generate test-data with the provided scripts.
They do not check the input, and are therefore very fragile if you do
not provide the input as expected.
"""
import os
import subprocess
import numpy as np
from scipy.constants import mu_0
from os.path import join, dirname
class ChDir(object):
r"""Step into a directory temporarily.
Taken from:
pythonadventures.wordpress.com/2013/12/15/
chdir-a-context-manager-for-switching-working-directories
"""
def __init__(self, path):
self.old_dir = os.getcwd()
self.new_dir = path
def __enter__(self):
os.chdir(self.new_dir)
def __exit__(self, *args):
os.chdir(self.old_dir)
def green3d(src, rec, depth, res, freq, aniso, par, strength=0):
r"""Run model with green3d (CEMI).
You must have Green3D installed (for which you need to be a member of the
CEMI consortium). The following files must be in the folder
`empymod/tests/green3d`:
- `green3d.m`
- `grint.mexa64`
- (`grint.mexw64`)
- (`normal.mexa64`)
- (`normal.mexw64`).
Furthermore, you need to have Matlab installed.
http://www.cemi.utah.edu
"""
# Execution directory
# (no need to create it, it HAS to exist with the necessary green3d-files).
rundir = join(dirname(__file__), 'tmp/green3d/')
# Source-input depending on par
if par in [9, 10]:
srcstr = str(src[0]) + ' ' + str(src[1]) + ' ' + str(src[2]) + ' '
srcstr += str(src[3]) + ' ' + str(src[4])
elif par in [2, 3]:
srcstr = str(strength) + ' ' + str(src[0]) + ' ' + str(src[2]) + ' '
srcstr += str(src[4]) + ' ' + str(src[1]) + ' ' + str(src[3]) + ' '
srcstr += str(src[5])
elif par in [6, 7, 8]:
srcstr = str(src[0]) + ' ' + str(src[1]) + ' ' + str(src[2])
# Write input file
with open(rundir + 'run.sh', 'wb') as runfile:
runfile.write(bytes(
'#!/bin/bash\n\nmatlab -nodesktop -nosplash -r "[e, h] = green3d('
'[' + ','.join(map(str, freq))+'], '
'[' + ','.join(map(str, depth[1:] - np.r_[0, depth[1:-1]])) + '], '
'[' + ','.join(map(str, 1/res[1:])) + '], '
'[' + ','.join(map(str, aniso[1:])) + '], '
'[' + ','.join(map(str, rec[0].ravel())) + '], '
'[' + ','.join(map(str, rec[1].ravel())) + '], '
'[' + ','.join(map(str, np.ones(np.size(rec[0])) * rec[2])) + '], '
'[' + str(par) + ' ' + srcstr + ']); exit"', 'UTF-8'))
# Run Green3D
with ChDir(rundir):
subprocess.run('bash run.sh', shell=True,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# Read output-file
with open(rundir + 'out.txt', 'rb') as outfile:
temp = np.loadtxt(outfile)
Ex = temp[:, 0] + 1j*temp[:, 1]
Ey = temp[:, 2] + 1j*temp[:, 3]
Ez = temp[:, 4] + 1j*temp[:, 5]
Hx = temp[:, 6] + 1j*temp[:, 7]
Hy = temp[:, 8] + 1j*temp[:, 9]
Hz = temp[:, 10] + 1j*temp[:, 11]
if par in [6, 7, 8, 10]:
Ex /= 2j*freq*np.pi*mu_0
Ey /= 2j*freq*np.pi*mu_0
Ez /= 2j*freq*np.pi*mu_0
Hx /= 2j*freq*np.pi*mu_0
Hy /= 2j*freq*np.pi*mu_0
Hz /= 2j*freq*np.pi*mu_0
return Ex, Ey, Ez, Hx, Hy, Hz
def dipole1d(src, rec, depth, res, freq, srcpts=5):
r"""Run model with dipole1d (Scripps).
You must have Dipole1D installed and it must be in your system path.
https://software.seg.org/2012/0003
"""
# Create directory, overwrite existing
rundir = join(dirname(__file__), 'tmp/dipole1d/')
os.makedirs(rundir, exist_ok=True)
# Source: A bipole in dipole1d is defined as: center point, angles, length
if len(src) == 6:
dx = src[1] - src[0]
dy = src[3] - src[2]
dz = src[5] - src[4]
r = np.sqrt(dx**2 + dy**2 + dz**2)
theta = np.rad2deg(np.arctan2(dy, dx))
phi = np.rad2deg(np.pi/2-np.arccos(dz/r))
src = [src[0]+dx/2, src[2]+dy/2, src[4]+dz/2, theta, phi]
else:
r = 0 # 0 = dipole
# Angle: In empymod, x is Easting, and the angle is the deviation from x
# anticlockwise. In Dipole1D, x is Northing, and the angle is the
# deviation from x clockwise. Convert angle to within 0<=ang<360:
ang = (-src[3] % - 360 + 90) % 360
# Counts
nsrc = np.size(src[2])
nrec = np.size(rec[0])
nfreq = np.size(freq)
nlay = np.size(res)
# Write input file
with open(rundir + 'RUNFILE', 'wb') as runfile:
runfile.write(bytes(
'Version: DIPOLE1D_1.0\n'
'Output Filename: dipole1d.csem\n'
'CompDerivatives: no\n'
'HT Filters: kk_ht_401\n'
'UseSpline1D: no\n'
'Dipole Length: '+str(r)+'\n'
'# integ pts: '+str(srcpts)+'\n'
'# TRANSMITTERS: '+str(nsrc)+'\n'
' X Y Z ROTATION DIP\n',
'UTF-8'))
np.savetxt(runfile, np.atleast_2d(np.r_[src[1], src[0], src[2], ang,
src[4]]), fmt='%12.4f')
runfile.write(bytes('# FREQUENCIES: '+str(nfreq)+'\n',
'UTF-8'))
np.savetxt(runfile, freq, fmt='%10.3f')
runfile.write(bytes('# LAYERS: '+str(nlay)+'\n',
'UTF-8'))
np.savetxt(runfile, np.r_[[np.r_[-1000000, depth]], [res]].transpose(),
fmt='%12.5g')
runfile.write(bytes('# RECEIVERS: '+str(nrec)+'\n',
'UTF-8'))
rec = np.r_[[rec[1].ravel()], [rec[0].ravel()],
[np.ones(np.size(rec[0]))*rec[2]]]
np.savetxt(runfile, rec.transpose(), fmt='%12.4f')
# Run dipole1d
with ChDir(rundir):
subprocess.run('DIPOLE1D', shell=True,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# Read output-file
skiprows = nlay + nsrc + nfreq + nrec + 6
with open(rundir + 'dipole1d.csem', 'rb') as outfile:
temp = np.loadtxt(outfile, skiprows=skiprows, unpack=True)
Ex = temp[0] - 1j*temp[1]
Ey = temp[2] - 1j*temp[3]
Ez = temp[4] - 1j*temp[5]
Hx = -temp[6]/mu_0 + 1j*temp[7]/mu_0
Hy = -temp[8]/mu_0 + 1j*temp[9]/mu_0
Hz = -temp[10]/mu_0 + 1j*temp[11]/mu_0
return Ey, Ex, Ez, Hy, Hx, Hz
def emmod(dx, nx, dy, ny, src, rec, depth, res, freq, aniso, epermV, epermH,
mpermV, mpermH, ab, nd=1000, startlogx=-6, deltalogx=0.5, nlogx=24,
kmax=10, c1=0, c2=0.001, maxpt=1000, dopchip=0, xdirect=0):
r"""Run model with emmod (Hunziker et al, 2015).
You must have EMmod installed and it must be in your system path.
https://software.seg.org/2015/0001
nd : number of integration domains
startlogx : first integration point in space
deltalogx : log sampling rate of integr. pts in space at first iteration
nlogx : amount of integration points in space at first iteration
kmax : largest wavenumber to be integrated
c1 : first precision parameter
c2 : second precision parameter
maxpt : maximum amount of integration points in space
dopchip : pchip interpolation (1) or linear interpolation (0)
xdirect : direct field in space domain (1) or in wavenumber domain (0)
"""
# Create directory, overwrite existing
rundir = join(dirname(__file__), 'tmp/emmod/')
os.makedirs(rundir, exist_ok=True)
# Write input-file
with open(rundir + 'emmod.scr', 'wb') as runfile:
runfile.write(bytes(
'#!/bin/bash\n\nemmod \\\n'
' freq='+str(freq)+' \\\n'
' file_out=emmod.out \\\n'
' writebin=0 \\\n'
' nx='+str(nx)+' \\\n'
' ny='+str(ny)+' \\\n'
' zsrc='+str(src[2])+' \\\n'
' zrcv='+str(rec[2])+' \\\n'
' dx='+str(dx)+' \\\n'
' dy='+str(dy)+' \\\n'
' z='+','.join(map(str, np.r_[-1, depth]))+' \\\n'
' econdV='+','.join(map(str, 1/(res*aniso**2)))+' \\\n'
' econdH='+','.join(map(str, 1/res))+' \\\n'
' epermV='+','.join(map(str, epermV))+' \\\n'
' epermH='+','.join(map(str, epermH))+' \\\n'
' mpermV='+','.join(map(str, mpermV))+' \\\n'
' mpermH='+','.join(map(str, mpermH))+' \\\n'
' verbose=0 \\\n'
' component='+str(ab)+' \\\n'
' nd='+str(nd)+' \\\n'
' startlogx='+str(startlogx)+' \\\n'
' deltalogx='+str(deltalogx)+' \\\n'
' nlogx='+str(nlogx)+' \\\n'
' kmax='+str(kmax)+' \\\n'
' c1='+str(c1)+' \\\n'
' c2='+str(c2)+' \\\n'
' maxpt='+str(maxpt)+' \\\n'
' dopchip='+str(dopchip)+' \\\n'
' xdirect='+str(xdirect)+' \n',
'UTF-8'))
# Run EMmod
with ChDir(rundir):
subprocess.run('bash emmod.scr', shell=True,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# Read output-file
with open(rundir + 'emmod.out', 'rb') as outfile:
temp = np.loadtxt(outfile, skiprows=1, unpack=True)
# Get same x/y as requested (round to mm)
tct = np.round(temp[0], 4) + 1j*np.round(temp[1], 4)
tcr = np.round(rec[0], 4) + 1j*np.round(rec[1], 4)
result = np.zeros(rec[0].shape, dtype=complex)
for i in range(rec[0].size):
itr = np.where(tct == tcr[i])[0]
result[i] = (temp[3][itr] + 1j*temp[4][itr])[0]
return result
|
|
# Contributors:
# Christopher P. Barnes <senrabc@gmail.com>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <mohan.das142@gmail.com>
# Philip Chase <philipbchase@gmail.com>
# Ruchi Vivek Desai <ruchivdesai@gmail.com>
# Taeber Rapczak <taeber@ufl.edu>
# Nicholas Rejack <nrejack@ufl.edu>
# Josh Hanna <josh@hanna.io>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
"""
Functions related to uploading data to REDCap
"""
import ast
import collections
import datetime
import logging
import os
from lxml import etree
from redcap import RedcapError
from utils import throttle
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def create_import_data_json(import_data_dict, event_tree):
"""
Convert data from event_tree to json format.
@TODO: evaluate performance
@see the caller {@link #redi.upload.generate_output()}
:param: import_data_dict: holds the event tree data
:param: event_tree: holds the event tree data
:rtype: dict
:return the json version of the xml data
"""
root = event_tree
event_name = root.find('name')
if event_name is None or not event_name.text:
raise Exception('Expected non-blank element event/name')
import_data_dict['redcap_event_name'] = event_name.text
event_field_value_list = root.xpath('//event/field/name')
for name in event_field_value_list:
if name.text is None:
raise Exception(
'Expected non-blank element event/field/name')
# Match all fields to build a row for each
event_field_list = root.xpath('field')
contains_data = False
for field in event_field_list:
val = field.findtext('value', '')
import_data_dict[field.findtext('name')] = val
if val and not contains_data:
contains_data = True
return {'json_data': import_data_dict, 'contains_data': contains_data}
def create_redcap_records(import_data):
"""
Creates REDCap records from RED-I's form data, AKA import data.
REDCap API only accepts records for importing. Records are differentiated by
their unique record ID, unless the REDCap Project is a Longitudinal study.
In that case, they are differentiated by a combination of record ID and an
event.
Since RED-I views the world in terms of forms, we have to project our
form-centric view into REDCap's record-centric world. This is done by
combining all form data with the same Subject ID and Event Name into the
same record.
:param import_data: iterable of 4-tuples: (study_id_key, form_name,
event_name, json_data_dict)
:return: iterable of REDCap records ready for upload
"""
records_by_subject_and_event = collections.defaultdict(dict)
for subject_id_key, _, event_name, record in import_data:
records_by_subject_and_event[subject_id_key, event_name].update(record)
return records_by_subject_and_event.itervalues()
def generate_output(person_tree, redcap_client, rate_limit, sent_events,
max_retry_count, skip_blanks=False, bulk_send_blanks=False):
"""
Note: This function communicates with the redcap application.
Steps:
- loop for each person/form/event element
- generate a csv fragment `using create_eav_output`
- send csv fragment to REDCap using `send_eav_data_to_redcap`
@see the caller {@link #redi.redi._run()}
:rtype: dictionary
:return: the report_data which is passed to the report rendering function
"""
# the global dictionary to be returned
report_data = {
'errors': []
}
"""
For each person we keep a count for each form type:
subject_details = array(
'person_A' => array('form_1': 1, 'form_2': 10, ...
'person_B' => array('form_1': 1, 'form_2': 10, ...
...
)
"""
subject_details = {}
# For each form type we keep a global count
form_details = {}
# count how many `person` elements are parsed
person_count = 0
root = person_tree.getroot()
persons = root.xpath('//person')
upload_data = throttle.Throttle(redcap_client.send_data_to_redcap,
int(rate_limit))
blanks = []
# main loop for each person
for person in persons:
time_begin = datetime.datetime.now()
person_count += 1
study_id = (person.xpath('study_id') or [None])[0]
if study_id is None:
raise Exception('Expected a valid value for study_id')
# count how many csv fragments are created per person
event_count = 0
logger.info('Start sending data for study_id: %s' % study_id.text)
forms = person.xpath('./all_form_events/form')
# loop through the forms of one person
for form in forms:
form_name = form.xpath('name')[0].text
form_key = 'Total_' + form_name + '_Forms'
study_id_key = study_id.text
# init dictionary for a new person in (study_id)
if study_id_key not in subject_details:
subject_details[study_id_key] = {}
subject_details[study_id_key]['lab_id'] = person.get('lab_id')
if not form_key in subject_details[study_id_key]:
subject_details[study_id_key][form_key] = 0
if form_key not in form_details:
form_details[form_key] = 0
logger.debug(
'parsing study_id ' +
study_id.text +
' form: ' +
form_name)
# loop through the events of one form
for event in form.xpath('event'):
event_name = event.findtext('name', '')
assert event_name, "Missing name for form event"
try:
import_dict = {
redcap_client.project.def_field: study_id.text}
import_dict = create_import_data_json(
import_dict,
event)
json_data_dict = import_dict['json_data']
contains_data = import_dict['contains_data']
if sent_events.was_sent(study_id_key, form_name, event_name):
logger.debug("Skipping previously sent " + event_name)
if contains_data:
# if no error_strings encountered update event counters
subject_details[study_id_key][form_key] += 1
form_details[form_key] += 1
continue
is_blank = not contains_data
if is_blank:
if skip_blanks:
# assume subsequent events for this form and subject
# are blank and simply move on to the next form by
# breaking out of the events-loop
break
if bulk_send_blanks:
blanks.append((study_id_key, form_name, event_name,
json_data_dict))
continue
event_count += 1
if (0 == event_count % 50):
logger.info('Requests sent: %s' % (event_count))
# to speedup testing uncomment the following line
# if (0 == event_count % 2) : continue
try:
upload_data([json_data_dict], max_retry_count,
overwrite=True)
sent_events.mark_sent(study_id_key, form_name, event_name)
logger.debug("Sent " + event_name)
if contains_data:
# if no errors encountered update event counters
subject_details[study_id_key][form_key] += 1
form_details[form_key] += 1
except RedcapError as redcap_err:
handle_errors_in_redcap_xml_response(study_id, redcap_err, report_data)
except Exception as e:
logger.error(e.message)
raise
time_end = datetime.datetime.now()
logger.info("Total execution time for study_id %s was %s" % (study_id_key, (time_end - time_begin)))
logger.info("Total REDCap requests sent: %s \n" % (event_count))
if blanks:
logger.info("Sending blank forms in bulk...")
records = list(create_redcap_records(blanks))
try:
response = upload_data(records, overwrite=True)
for study_id_key, form_name, event_name, record in blanks:
sent_events.mark_sent(study_id_key, form_name, event_name)
logger.info("Sent {} blank form-events.".format(response['count']))
except RedcapError as redcap_err:
logger.error("Failed to send blank form-events.")
handle_errors_in_redcap_xml_response(study_id, redcap_err, report_data)
report_data.update({
'total_subjects': person_count,
'form_details': form_details,
'subject_details': subject_details,
'errors': report_data['errors']
})
logger.debug('report_data ' + repr(report_data))
return report_data
def handle_errors_in_redcap_xml_response(study_id, redcap_err, report_data):
"""
Checks for any errors in the redcap response and update
report data if there are any errors.
Parameters:
-----------
redcap_err: RedcapError object
report_data: dictionary to which we store error details
"""
# converting string to dictionary
response = ast.literal_eval(str(redcap_err))
logger.debug('handling response from the REDCap')
if 'error' not in response:
logger.warn("RedcapError does not contain the expected 'error' key: {}"
.format(response))
return
if 'records' in response:
records = response['records']
for record in records:
details = "(record: {}, field_name: {}, value: {}, message: {})" \
.format(
record['record'],
record['field_name'],
record['value'],
record['message'])
error_string = "{}: {}".format(response['error'], details)
report_data['errors'].append(error_string)
logger.error("{}".format(error_string))
elif 'fields' in response:
fields = response['fields']
details = "{}".format(fields)
error_string = "{}: {}".format(response['error'], details)
report_data['errors'].append(error_string)
logger.error(error_string)
else:
err = "A RedcapError ocured for study_id: {}. " \
"It contains an unexpected type of error: {}" \
.format(study_id, response)
logger.warn(err)
|
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test client vfs."""
import functools
import os
from grr.client import vfs
from grr.client.client_actions import searching
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class MockVFSHandlerFind(vfs.VFSHandler):
"""A mock VFS handler for finding files.
This is used to create the /mock2/ client vfs branch which is utilized in the
below tests.
"""
supported_pathtype = rdf_paths.PathSpec.PathType.OS
filesystem = {"/": ["mock2"],
"/mock2": ["directory1", "directory3"],
"/mock2/directory1": ["file1.txt", "file2.txt", "directory2"],
"/mock2/directory1/file1.txt": "Secret 1",
"/mock2/directory1/file2.txt": "Another file",
"/mock2/directory1/directory2": ["file.jpg", "file.mp3"],
"/mock2/directory1/directory2/file.jpg": "JPEG",
"/mock2/directory1/directory2/file.mp3": "MP3 movie",
"/mock2/directory3": ["file1.txt", "long_file.text"],
"/mock2/directory3/file1.txt": "A text file",
"/mock2/directory3/long_file.text": ("space " * 100000 +
"A Secret")}
def __init__(self, base_fd, pathspec=None, progress_callback=None,
full_pathspec=None):
super(MockVFSHandlerFind, self).__init__(
base_fd, pathspec=pathspec, progress_callback=progress_callback,
full_pathspec=full_pathspec)
self.pathspec.Append(pathspec)
self.path = self.pathspec.CollapsePath()
try:
self.content = self.filesystem[self.path]
if isinstance(self.content, str):
self.size = len(self.content)
except KeyError:
raise IOError("not mocking %s" % self.path)
def Read(self, length):
# Reading the mocked directory raises.
if isinstance(self.content, list):
raise IOError()
result = self.content[self.offset:self.offset + length]
self.offset = min(self.size, self.offset + len(result))
return result
def ListNames(self):
return self.content
def DoStat(self, path):
result = rdf_client.StatEntry()
if path.startswith("/mock2/directory3"):
result.st_dev = 1
else:
result.st_dev = 2
f = self.filesystem[path]
if isinstance(f, str):
if path.startswith("/mock2/directory1/directory2"):
result.st_mode = 0o0100644 # u=rw,g=r,o=r on regular file
result.st_uid = 50
result.st_gid = 500
elif path.startswith("/mock2/directory3"):
result.st_mode = 0o0100643 # u=rw,g=r,o=wx on regular file
result.st_uid = 60
result.st_gid = 600
else:
result.st_mode = 0o0104666 # setuid, u=rw,g=rw,o=rw on regular file
result.st_uid = 90
result.st_gid = 900
else:
result.st_mode = 0o0040775 # u=rwx,g=rwx,o=rx on directory
result.st_uid = 0
result.st_gid = 4
result.st_size = len(f)
result.st_mtime = 1373185602
return result
def ListFiles(self):
"""Mock the filesystem."""
for child in self.content:
# We have a mock FS here that only uses "/".
path = "/".join([self.path, child])
result = self.DoStat(path)
ps = self.pathspec.Copy()
ps.Append(path=child, pathtype=self.supported_pathtype)
result.pathspec = ps
yield result
def IsDirectory(self):
return bool(self.content)
def Stat(self):
result = self.DoStat(self.path)
result.pathspec = self.pathspec
return result
def SearchParams(block_size, envelope_size):
def Decorator(func):
@functools.wraps(func)
def _SearchParams(*args, **kwargs):
"""Wrapper function that sets and restores search parameters."""
old_sizes = (searching.Grep.BUFF_SIZE, searching.Grep.ENVELOPE_SIZE)
searching.Grep.BUFF_SIZE = block_size
searching.Grep.ENVELOPE_SIZE = envelope_size
try:
return func(*args, **kwargs)
finally:
searching.Grep.BUFF_SIZE, searching.Grep.ENVELOPE_SIZE = old_sizes
return _SearchParams
return Decorator
class FindTest(test_lib.EmptyActionTest):
"""Test the find client Actions."""
def setUp(self):
super(FindTest, self).setUp()
# Install the mock
self.vfs_overrider = test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, MockVFSHandlerFind)
self.vfs_overrider.Start()
def tearDown(self):
super(FindTest, self).tearDown()
self.vfs_overrider.Stop()
def testFindAction(self):
"""Test the find action."""
# First get all the files at once
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".")
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
# Ask for the files one at the time
files = []
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".")
request.iterator.number = 1
while True:
result = self.RunAction("Find", request)
if request.iterator.state == rdf_client.Iterator.State.FINISHED:
break
self.assertEqual(len(result), 2)
self.assertTrue(isinstance(result[0], rdf_client.FindSpec))
self.assertTrue(isinstance(result[1], rdf_client.Iterator))
files.append(result[0].hit)
request.iterator = result[1].Copy()
for x, y in zip(all_files, files):
self.assertRDFValueEqual(x, y)
# Make sure the iterator is finished
self.assertEqual(request.iterator.state, rdf_client.Iterator.State.FINISHED)
# Ensure we remove old states from client_state
self.assertEqual(len(request.iterator.client_state.dat), 0)
def testFindAction2(self):
"""Test the find action path regex."""
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".*mp3")
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 1)
self.assertEqual(
all_files[0].pathspec.Basename(), "file.mp3")
def testFindAction3(self):
"""Test the find action data regex."""
# First get all the files at once
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client.FindSpec(pathspec=pathspec, data_regex="Secret",
cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Basename(),
"file1.txt")
self.assertEqual(all_files[1].pathspec.Basename(),
"long_file.text")
def testFindSizeLimits(self):
"""Test the find action size limits."""
# First get all the files at once
request = rdf_client.FindSpec(min_file_size=4, max_file_size=15,
cross_devs=True)
request.pathspec.Append(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
request.iterator.number = 200
results = self.RunAction("Find", request)
all_files = []
for result in results:
if isinstance(result, rdf_client.FindSpec):
all_files.append(result.hit.pathspec.Basename())
self.assertEqual(len(all_files), 5)
for filename in all_files:
# Our mock filesize is the length of the base filename, check all the
# files we got match the size criteria
self.assertTrue(4 <= len(filename) <= 15)
def testNoFilters(self):
"""Test the we get all files with no filters in place."""
# First get all the files at once
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client.FindSpec(pathspec=pathspec, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 9)
def testFindActionCrossDev(self):
"""Test that devices boundaries don't get crossed, also by default."""
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client.FindSpec(pathspec=pathspec, cross_devs=True,
path_regex=".")
request.iterator.number = 200
results = self.RunAction("Find", request)
all_files = [x.hit for x in results if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 9)
request = rdf_client.FindSpec(pathspec=pathspec, cross_devs=False,
path_regex=".")
request.iterator.number = 200
results = self.RunAction("Find", request)
all_files = [x.hit for x in results if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 7)
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".")
request.iterator.number = 200
results = self.RunAction("Find", request)
all_files = [x.hit for x in results if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 7)
def testPermissionFilter(self):
"""Test filtering based on file/folder permission happens correctly."""
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
# Look for files that match exact permissions
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
perm_mode=0o644, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory2")
self.assertEqual(all_files[0].pathspec.Basename(), "file.jpg")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory2")
self.assertEqual(all_files[1].pathspec.Basename(), "file.mp3")
# Look for files/folders where 'others' have 'write' permission. All other
# attributes don't matter. Setuid bit must also be set and guid or sticky
# bit must not be set.
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
perm_mode=0o4002, perm_mask=0o7002,
cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory1")
self.assertEqual(all_files[0].pathspec.Basename(), "file1.txt")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory1")
self.assertEqual(all_files[1].pathspec.Basename(), "file2.txt")
# Look for files where 'others' have 'execute' permission. All other
# attributes don't matter. Only look for 'regular' files.
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
perm_mode=0o0100001, perm_mask=0o0100001,
cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory3")
self.assertEqual(all_files[0].pathspec.Basename(), "file1.txt")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory3")
self.assertEqual(all_files[1].pathspec.Basename(), "long_file.text")
# Look for folders where 'group' have 'execute' permission. All other
# attributes don't matter. Only look for folders.
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
perm_mode=0o0040010, perm_mask=0o0040010,
cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 3)
self.assertEqual(all_files[0].pathspec.Basename(), "directory2")
self.assertEqual(all_files[1].pathspec.Basename(), "directory1")
self.assertEqual(all_files[2].pathspec.Basename(), "directory3")
def testUIDFilter(self):
"""Test filtering based on uid happens correctly."""
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
# Look for files that have uid of 60
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
uid=60, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory3")
self.assertEqual(all_files[0].pathspec.Basename(), "file1.txt")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory3")
self.assertEqual(all_files[1].pathspec.Basename(), "long_file.text")
# Look for files that have uid of 0
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
uid=0, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 3)
self.assertEqual(all_files[0].pathspec.Basename(), "directory2")
self.assertEqual(all_files[1].pathspec.Basename(), "directory1")
self.assertEqual(all_files[2].pathspec.Basename(), "directory3")
def testGIDFilter(self):
"""Test filtering based on gid happens correctly."""
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
# Look for files that have gid of 500
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
gid=500, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory2")
self.assertEqual(all_files[0].pathspec.Basename(), "file.jpg")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory2")
self.assertEqual(all_files[1].pathspec.Basename(), "file.mp3")
# Look for files that have uid of 900
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
gid=900, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory1")
self.assertEqual(all_files[0].pathspec.Basename(), "file1.txt")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory1")
self.assertEqual(all_files[1].pathspec.Basename(), "file2.txt")
def testUIDAndGIDFilter(self):
"""Test filtering based on combination of uid and gid happens correctly."""
pathspec = rdf_paths.PathSpec(path="/mock2/",
pathtype=rdf_paths.PathSpec.PathType.OS)
# Look for files that have uid of 90 and gid of 500
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
uid=90, gid=500, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 0)
# Look for files that have uid of 50 and gid of 500
request = rdf_client.FindSpec(pathspec=pathspec, path_regex=".",
uid=50, gid=500, cross_devs=True)
request.iterator.number = 200
result = self.RunAction("Find", request)
all_files = [x.hit for x in result if isinstance(x, rdf_client.FindSpec)]
self.assertEqual(len(all_files), 2)
self.assertEqual(all_files[0].pathspec.Dirname().Basename(),
"directory2")
self.assertEqual(all_files[0].pathspec.Basename(), "file.jpg")
self.assertEqual(all_files[1].pathspec.Dirname().Basename(),
"directory2")
self.assertEqual(all_files[1].pathspec.Basename(), "file.mp3")
class GrepTest(test_lib.EmptyActionTest):
"""Test the find client Actions."""
XOR_IN_KEY = 0
XOR_OUT_KEY = 0
def setUp(self):
super(GrepTest, self).setUp()
# Install the mock
self.vfs_overrider = test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, MockVFSHandlerFind)
self.vfs_overrider.Start()
self.filename = "/mock2/directory1/grepfile.txt"
def tearDown(self):
super(GrepTest, self).tearDown()
self.vfs_overrider.Stop()
def testGrep(self):
# Use the real file system.
vfs.VFSInit().Run()
request = rdf_client.GrepSpec(
literal=utils.Xor("10", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = os.path.join(self.base_path, "numbers.txt")
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
result = self.RunAction("Grep", request)
hits = [x.offset for x in result]
self.assertEqual(hits, [18, 288, 292, 296, 300, 304, 308, 312, 316,
320, 324, 329, 729, 1129, 1529, 1929, 2329,
2729, 3129, 3529, 3888])
for x in result:
self.assertTrue("10" in utils.Xor(x.data, self.XOR_OUT_KEY))
self.assertEqual(request.target.path, x.pathspec.path)
def testGrepRegex(self):
# Use the real file system.
vfs.VFSInit().Run()
request = rdf_client.GrepSpec(
regex="1[0]", xor_out_key=self.XOR_OUT_KEY, start_offset=0,
target=rdf_paths.PathSpec(
path=os.path.join(self.base_path, "numbers.txt"),
pathtype=rdf_paths.PathSpec.PathType.OS))
result = self.RunAction("Grep", request)
hits = [x.offset for x in result]
self.assertEqual(hits, [18, 288, 292, 296, 300, 304, 308, 312, 316,
320, 324, 329, 729, 1129, 1529, 1929, 2329,
2729, 3129, 3529, 3888])
for x in result:
self.assertTrue("10" in utils.Xor(x.data, self.XOR_OUT_KEY))
def testGrepLength(self):
data = "X" * 100 + "HIT"
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].offset, 100)
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
request.length = 100
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 0)
def testGrepOffset(self):
data = "X" * 10 + "HIT" + "X" * 100
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].offset, 10)
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 5
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
# This should still report 10.
self.assertEqual(result[0].offset, 10)
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 11
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 0)
def testOffsetAndLength(self):
data = "X" * 10 + "HIT" + "X" * 100 + "HIT" + "X" * 10
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 11
request.length = 100
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 0)
@SearchParams(1000, 100)
def testSecondBuffer(self):
data = "X" * 1500 + "HIT" + "X" * 100
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].offset, 1500)
@SearchParams(1000, 100)
def testBufferBoundaries(self):
for offset in xrange(-20, 20):
data = "X" * (1000 + offset) + "HIT" + "X" * 100
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].offset, 1000 + offset)
expected = "X" * 10 + "HIT" + "X" * 10
self.assertEqual(result[0].length, len(expected))
self.assertEqual(utils.Xor(result[0].data, self.XOR_OUT_KEY),
expected)
def testSnippetSize(self):
data = "X" * 100 + "HIT" + "X" * 100
MockVFSHandlerFind.filesystem[self.filename] = data
for before in [50, 10, 1, 0]:
for after in [50, 10, 1, 0]:
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
request.bytes_before = before
request.bytes_after = after
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].offset, 100)
expected = "X" * before + "HIT" + "X" * after
self.assertEqual(result[0].length, len(expected))
self.assertEqual(utils.Xor(result[0].data, self.XOR_OUT_KEY),
expected)
@SearchParams(100, 50)
def testGrepEverywhere(self):
for offset in xrange(500):
data = "X" * offset + "HIT" + "X" * (500 - offset)
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
request.bytes_before = 10
request.bytes_after = 10
result = self.RunAction("Grep", request)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].offset, offset)
expected = data[max(0, offset - 10):offset + 3 + 10]
self.assertEqual(result[0].length, len(expected))
self.assertEqual(utils.Xor(result[0].data, self.XOR_OUT_KEY),
expected)
def testHitLimit(self):
limit = searching.Grep.HIT_LIMIT
hit = "x" * 10 + "HIT" + "x" * 10
data = hit * (limit + 100)
MockVFSHandlerFind.filesystem[self.filename] = data
request = rdf_client.GrepSpec(
literal=utils.Xor("HIT", self.XOR_IN_KEY),
xor_in_key=self.XOR_IN_KEY,
xor_out_key=self.XOR_OUT_KEY)
request.target.path = self.filename
request.target.pathtype = rdf_paths.PathSpec.PathType.OS
request.start_offset = 0
request.bytes_before = 10
request.bytes_after = 10
result = self.RunAction("Grep", request)
self.assertEqual(len(result), limit + 1)
error = "maximum number of hits"
self.assertTrue(error in utils.Xor(result[-1].data,
self.XOR_OUT_KEY))
class XoredSearchingTest(GrepTest):
"""Test the searching client Actions using XOR."""
XOR_IN_KEY = 37
XOR_OUT_KEY = 57
class FindBenchmarks(test_lib.AverageMicroBenchmarks,
test_lib.EmptyActionTest):
REPEATS = 100
units = "us"
def testFindAction(self):
# First get all the files at once
def RunFind():
pathspec = rdf_paths.PathSpec(path=self.base_path,
pathtype=rdf_paths.PathSpec.PathType.OS)
request = rdf_client.FindSpec(pathspec=pathspec)
request.iterator.number = 80
result = self.RunAction("Find", request)
# 80 results plus one iterator.
self.assertEqual(len(result), 81)
self.TimeIt(RunFind, "Find files with no filters.")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
from io import BytesIO
from azure.common import AzureHttpError
from azure.storage.blob import (
Blob,
BlockBlobService,
PageBlobService,
AppendBlobService,
CustomerProvidedEncryptionKey,
BlobBlock,
BlobPermissions,
ContentSettings)
from tests.testcase import (
StorageTestCase,
record,
)
from datetime import (
datetime,
timedelta,
)
from tests.testcase import TestMode
# ------------------------------------------------------------------------------
TEST_ENCRYPTION_KEY = CustomerProvidedEncryptionKey(key_value="MDEyMzQ1NjcwMTIzNDU2NzAxMjM0NTY3MDEyMzQ1Njc=",
key_hash="3QFFFpRA5+XANHqwwbT4yXDmrT/2JaLt/FKHjzhOdoE=")
# ------------------------------------------------------------------------------
class StorageCPKTest(StorageTestCase):
def setUp(self):
super(StorageCPKTest, self).setUp()
self.bbs = self._create_storage_service(BlockBlobService, self.settings)
self.pbs = self._create_storage_service(PageBlobService, self.settings)
self.abs = self._create_storage_service(AppendBlobService, self.settings)
self.container_name = self.get_resource_name('utcontainer')
# prep some test data so that they can be used in upload tests
self.byte_data = self.get_random_bytes(64 * 1024)
# create source blob to be copied from
self.source_blob_name = self.get_resource_name('srcblob')
if not self.is_playback():
self.bbs.create_container(self.container_name)
self.bbs.create_blob_from_bytes(self.container_name, self.source_blob_name, self.byte_data)
# generate a SAS so that it is accessible with a URL
sas_token = self.bbs.generate_blob_shared_access_signature(
self.container_name,
self.source_blob_name,
permission=BlobPermissions.READ,
expiry=datetime.utcnow() + timedelta(hours=1),
)
self.source_blob_url = self.bbs.make_blob_url(self.container_name, self.source_blob_name, sas_token=sas_token)
# configure the block blob service so that we can test create_blob_from* APIs with more than 1 chunk
self.bbs.MAX_BLOCK_SIZE = 1024
self.bbs.MAX_SINGLE_PUT_SIZE = 1024
self.bbs.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 1024
self.abs.MAX_BLOCK_SIZE = 1024
self.pbs.MAX_PAGE_SIZE = 1024
def tearDown(self):
if not self.is_playback():
try:
self.bbs.delete_container(self.container_name)
except:
pass
return super(StorageCPKTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name("cpk")
# -- Test cases for APIs supporting CPK ----------------------------------------------
@record
def test_put_block_and_put_block_list(self):
# Arrange
blob_name = self._get_blob_reference()
self.bbs.put_block(self.container_name, blob_name, b'AAA', '1', cpk=TEST_ENCRYPTION_KEY)
self.bbs.put_block(self.container_name, blob_name, b'BBB', '2', cpk=TEST_ENCRYPTION_KEY)
self.bbs.put_block(self.container_name, blob_name, b'CCC', '3', cpk=TEST_ENCRYPTION_KEY)
# Act
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
put_block_list_resp = self.bbs.put_block_list(self.container_name, blob_name, block_list,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(put_block_list_resp.etag)
self.assertIsNotNone(put_block_list_resp.last_modified)
self.assertTrue(put_block_list_resp.request_server_encrypted)
self.assertEqual(put_block_list_resp.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp.etag)
self.assertEqual(blob.properties.last_modified, put_block_list_resp.last_modified)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_create_block_blob_with_chunks(self):
# parallel operation
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
# Act
# create_blob_from_bytes forces the in-memory chunks to be used
put_block_list_resp = self.bbs.create_blob_from_bytes(self.container_name, blob_name, self.byte_data,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(put_block_list_resp.etag)
self.assertIsNotNone(put_block_list_resp.last_modified)
self.assertTrue(put_block_list_resp.request_server_encrypted)
self.assertEqual(put_block_list_resp.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data)
self.assertEqual(blob.properties.etag, put_block_list_resp.etag)
self.assertEqual(blob.properties.last_modified, put_block_list_resp.last_modified)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_create_block_blob_with_sub_streams(self):
# problem with the recording framework can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
# Act
stream = BytesIO(self.byte_data)
put_block_list_resp = self.bbs.create_blob_from_stream(self.container_name, blob_name, stream,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(put_block_list_resp.etag)
self.assertIsNotNone(put_block_list_resp.last_modified)
self.assertTrue(put_block_list_resp.request_server_encrypted)
self.assertEqual(put_block_list_resp.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data)
self.assertEqual(blob.properties.etag, put_block_list_resp.etag)
self.assertEqual(blob.properties.last_modified, put_block_list_resp.last_modified)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_create_block_blob_with_single_chunk(self):
# Arrange
blob_name = self._get_blob_reference()
# Act
put_block_list_resp = self.bbs.create_blob_from_bytes(self.container_name, blob_name, b'AAABBBCCC',
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(put_block_list_resp.etag)
self.assertIsNotNone(put_block_list_resp.last_modified)
self.assertTrue(put_block_list_resp.request_server_encrypted)
self.assertEqual(put_block_list_resp.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.bbs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.bbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, b'AAABBBCCC')
self.assertEqual(blob.properties.etag, put_block_list_resp.etag)
self.assertEqual(blob.properties.last_modified, put_block_list_resp.last_modified)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_put_block_from_url_and_commit(self):
# Arrange
dest_blob_name = self._get_blob_reference()
# Act part 1: make put block from url calls
self.bbs.put_block_from_url(self.container_name, dest_blob_name, self.source_blob_url,
source_range_start=0, source_range_end=4 * 1024 - 1, block_id=1,
cpk=TEST_ENCRYPTION_KEY)
self.bbs.put_block_from_url(self.container_name, dest_blob_name, self.source_blob_url,
source_range_start=4 * 1024, source_range_end=8 * 1024, block_id=2,
cpk=TEST_ENCRYPTION_KEY)
# Assert blocks
block_list = self.bbs.get_block_list(self.container_name, dest_blob_name, None, 'all')
self.assertEqual(len(block_list.uncommitted_blocks), 2)
self.assertEqual(len(block_list.committed_blocks), 0)
# commit the blocks without cpk should fail
block_list = [BlobBlock(id='1'), BlobBlock(id='2')]
with self.assertRaises(AzureHttpError):
self.bbs.put_block_list(self.container_name, dest_blob_name, block_list)
# Act commit the blocks with cpk should succeed
put_block_list_resp = self.bbs.put_block_list(self.container_name, dest_blob_name, block_list,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(put_block_list_resp.etag)
self.assertIsNotNone(put_block_list_resp.last_modified)
self.assertTrue(put_block_list_resp.request_server_encrypted)
self.assertEqual(put_block_list_resp.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Assert destination blob has right content
blob = self.bbs.get_blob_to_bytes(self.container_name, dest_blob_name, cpk=TEST_ENCRYPTION_KEY)
self.assertEqual(blob.content, self.byte_data[0: 8 * 1024 + 1])
self.assertEqual(blob.properties.etag, put_block_list_resp.etag)
self.assertEqual(blob.properties.last_modified, put_block_list_resp.last_modified)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_append_block(self):
# Arrange
blob_name = self._get_blob_reference()
self.abs.create_blob(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Act
for content in [b'AAA', b'BBB', b'CCC']:
append_blob_prop = self.abs.append_block(self.container_name, blob_name, content, cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(append_blob_prop.etag)
self.assertIsNotNone(append_blob_prop.last_modified)
self.assertTrue(append_blob_prop.request_server_encrypted)
self.assertEqual(append_blob_prop.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.abs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.abs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, b'AAABBBCCC')
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_append_block_from_url(self):
# Arrange
dest_blob_name = self._get_blob_reference()
self.abs.create_blob(self.container_name, dest_blob_name, cpk=TEST_ENCRYPTION_KEY)
# Act
append_blob_prop = self.abs.append_block_from_url(self.container_name, dest_blob_name, self.source_blob_url,
source_range_start=0, source_range_end=4 * 1024 - 1,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(append_blob_prop.etag)
self.assertIsNotNone(append_blob_prop.last_modified)
self.assertTrue(append_blob_prop.request_server_encrypted)
self.assertEqual(append_blob_prop.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.abs.get_blob_to_bytes(self.container_name, dest_blob_name)
# Act get the blob content
blob = self.abs.get_blob_to_bytes(self.container_name, dest_blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data[0: 4 * 1024])
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_create_append_blob_with_chunks(self):
# Arrange
blob_name = self._get_blob_reference()
self.abs.create_blob(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Act
append_blob_prop = self.abs.append_blob_from_bytes(self.container_name, blob_name, self.byte_data,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(append_blob_prop.etag)
self.assertIsNotNone(append_blob_prop.last_modified)
self.assertTrue(append_blob_prop.request_server_encrypted)
self.assertEqual(append_blob_prop.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.abs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.abs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_update_page(self):
# Arrange
blob_name = self._get_blob_reference()
self.pbs.create_blob(self.container_name, blob_name, content_length=1024 * 1024, cpk=TEST_ENCRYPTION_KEY)
# Act
page_blob_prop = self.pbs.update_page(self.container_name, blob_name, self.byte_data,
start_range=0,
end_range=len(self.byte_data) - 1,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(page_blob_prop.etag)
self.assertIsNotNone(page_blob_prop.last_modified)
self.assertTrue(page_blob_prop.request_server_encrypted)
self.assertEqual(page_blob_prop.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.pbs.get_blob_to_bytes(self.container_name, blob_name, start_range=0,
end_range=len(self.byte_data) - 1)
# Act get the blob content
blob = self.pbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY, start_range=0,
end_range=len(self.byte_data) - 1)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_update_page_from_url(self):
# Arrange
blob_name = self._get_blob_reference()
self.pbs.create_blob(self.container_name, blob_name, content_length=1024 * 1024, cpk=TEST_ENCRYPTION_KEY)
# Act
page_blob_prop = self.pbs.update_page_from_url(self.container_name, blob_name,
start_range=0,
end_range=len(self.byte_data) - 1,
copy_source_url=self.source_blob_url,
source_range_start=0,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(page_blob_prop.etag)
self.assertIsNotNone(page_blob_prop.last_modified)
self.assertTrue(page_blob_prop.request_server_encrypted)
self.assertEqual(page_blob_prop.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.pbs.get_blob_to_bytes(self.container_name, blob_name, start_range=0,
end_range=len(self.byte_data) - 1)
# Act get the blob content
blob = self.pbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY, start_range=0,
end_range=len(self.byte_data) - 1)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_create_page_blob_with_chunks(self):
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
# Act
page_blob_prop = self.pbs.create_blob_from_bytes(self.container_name, blob_name, self.byte_data,
cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(page_blob_prop.etag)
self.assertIsNotNone(page_blob_prop.last_modified)
self.assertTrue(page_blob_prop.request_server_encrypted)
self.assertEqual(page_blob_prop.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act get the blob content without cpk should fail
with self.assertRaises(AzureHttpError):
self.pbs.get_blob_to_bytes(self.container_name, blob_name)
# Act get the blob content
blob = self.pbs.get_blob_to_bytes(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert content was retrieved with the cpk
self.assertEqual(blob.content, self.byte_data)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
@record
def test_get_set_blob_properties(self):
# Arrange
blob_name = self._get_blob_reference()
self.bbs.create_blob_from_bytes(self.container_name, blob_name, b'AAABBBCCC', cpk=TEST_ENCRYPTION_KEY)
# Act without the encryption key should fail
with self.assertRaises(AzureHttpError):
self.bbs.get_blob_properties(self.container_name, blob_name)
# Act
blob = self.bbs.get_blob_properties(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertTrue(blob.properties.server_encrypted)
self.assertEqual(blob.properties.encryption_key_sha256, TEST_ENCRYPTION_KEY.key_hash)
# Act set blob properties
self.bbs.set_blob_properties(
self.container_name,
blob_name,
content_settings=ContentSettings(
content_language='spanish',
content_disposition='inline'),
cpk=TEST_ENCRYPTION_KEY,
)
# Assert
blob = self.bbs.get_blob_properties(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
self.assertEqual(blob.properties.content_settings.content_language, 'spanish')
self.assertEqual(blob.properties.content_settings.content_disposition, 'inline')
@record
def test_get_set_blob_metadata(self):
# Arrange
blob_name = self._get_blob_reference()
self.bbs.create_blob_from_bytes(self.container_name, blob_name, b'AAABBBCCC', cpk=TEST_ENCRYPTION_KEY)
metadata = {'hello': 'world', 'number': '42', 'UP': 'UPval'}
# Act without cpk should fail
with self.assertRaises(AzureHttpError):
self.bbs.set_blob_metadata(self.container_name, blob_name, metadata)
# Act with cpk should work
self.bbs.set_blob_metadata(self.container_name, blob_name, metadata, cpk=TEST_ENCRYPTION_KEY)
# Assert
md = self.bbs.get_blob_metadata(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['UP'], 'UPval')
self.assertFalse('up' in md)
# Act get metadata without cpk should fail
with self.assertRaises(AzureHttpError):
self.bbs.get_blob_metadata(self.container_name, blob_name)
@record
def test_snapshot_blob(self):
# Arrange
blob_name = self._get_blob_reference()
self.bbs.create_blob_from_bytes(self.container_name, blob_name, b'AAABBBCCC', cpk=TEST_ENCRYPTION_KEY)
# Act without cpk should not work
with self.assertRaises(AzureHttpError):
self.bbs.snapshot_blob(self.container_name, blob_name)
# Act with cpk should work
blob_snapshot = self.bbs.snapshot_blob(self.container_name, blob_name, cpk=TEST_ENCRYPTION_KEY)
# Assert
self.assertIsNotNone(blob_snapshot)
|
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division
import itertools
import os
from collections import OrderedDict, defaultdict, deque
from pex.common import pluralize
from pex.compatibility import urlparse
from pex.dist_metadata import DistMetadata
from pex.enum import Enum
from pex.fetcher import URLFetcher
from pex.pep_425 import TagRank
from pex.pep_503 import ProjectName
from pex.rank import Rank
from pex.resolve.resolved_requirement import Fingerprint, PartialArtifact, Pin, ResolvedRequirement
from pex.result import Error
from pex.sorted_tuple import SortedTuple
from pex.targets import LocalInterpreter, Target
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import (
IO,
Any,
Callable,
DefaultDict,
Deque,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Union,
)
import attr # vendor:skip
from packaging import tags # vendor:skip
from packaging import version as packaging_version # vendor:skip
from packaging.specifiers import SpecifierSet # vendor:skip
from pkg_resources import Requirement # vendor:skip
else:
from pex.third_party import attr
from pex.third_party.packaging import tags
from pex.third_party.packaging import version as packaging_version
from pex.third_party.packaging.specifiers import SpecifierSet
from pex.third_party.pkg_resources import Requirement
class LockStyle(Enum["LockStyle.Value"]):
class Value(Enum.Value):
pass
STRICT = Value("strict")
SOURCES = Value("sources")
UNIVERSAL = Value("universal")
@attr.s(frozen=True)
class LockConfiguration(object):
style = attr.ib() # type: LockStyle.Value
requires_python = attr.ib(default=()) # type: Tuple[str, ...]
@requires_python.validator
def _validate_requires_python(
self,
_attribute, # type: Any
value, # type: Tuple[str, ...]
):
if len(value) > 0 and self.style != LockStyle.UNIVERSAL:
raise ValueError(
"The requires_python field should only be populated for {universal} style locks; "
"this lock is {style} style and given requires_python of {requires_python}".format(
universal=LockStyle.UNIVERSAL.value,
style=self.style.value,
requires_python=value,
)
)
@attr.s(frozen=True)
class LockRequest(object):
lock_configuration = attr.ib() # type: LockConfiguration
resolve_handler = attr.ib() # type: Callable[[Iterable[ResolvedRequirement]], None]
@attr.s(frozen=True)
class Artifact(object):
url = attr.ib() # type: str
fingerprint = attr.ib() # type: Fingerprint
filename = attr.ib(init=False) # type: str
def __attrs_post_init__(self):
# type: () -> None
url_info = urlparse.urlparse(self.url)
filename = os.path.basename(url_info.path)
object.__setattr__(self, "filename", filename)
@property
def is_source(self):
# type: () -> bool
return self.filename.endswith((".sdist", ".tar.gz", ".tgz", ".tar.bz2", ".tbz2", ".zip"))
def parse_tags(self):
# type: () -> Iterator[tags.Tag]
if self.filename.endswith(".whl"):
artifact_stem, _ = os.path.splitext(self.filename)
for tag in tags.parse_tag(artifact_stem.split("-", 2)[-1]):
yield tag
@attr.s(frozen=True)
class RankedArtifact(object):
artifact = attr.ib() # type: Artifact
rank = attr.ib() # type: TagRank
def select_higher_ranked(self, other):
# type: (RankedArtifact) -> RankedArtifact
return Rank.select_highest_rank(
self, other, extract_rank=lambda ranked_artifact: ranked_artifact.rank
)
@attr.s(frozen=True)
class LockedRequirement(object):
@classmethod
def create(
cls,
pin, # type: Pin
artifact, # type: Artifact
requires_dists=(), # type: Iterable[Requirement]
requires_python=None, # type: Optional[SpecifierSet]
additional_artifacts=(), # type: Iterable[Artifact]
):
# type: (...) -> LockedRequirement
return cls(
pin=pin,
artifact=artifact,
requires_dists=SortedTuple(requires_dists, key=str),
requires_python=requires_python,
additional_artifacts=SortedTuple(additional_artifacts),
)
pin = attr.ib() # type: Pin
artifact = attr.ib() # type: Artifact
requires_dists = attr.ib(default=SortedTuple()) # type: SortedTuple[Requirement]
requires_python = attr.ib(default=None) # type: Optional[SpecifierSet]
additional_artifacts = attr.ib(default=SortedTuple()) # type: SortedTuple[Artifact]
def iter_artifacts(self):
# type: () -> Iterator[Artifact]
yield self.artifact
for artifact in self.additional_artifacts:
yield artifact
def select_artifact(
self,
target, # type: Target
build=True, # type: bool
use_wheel=True, # type: bool
):
# type: (...) -> Optional[RankedArtifact]
"""Select the highest ranking (most platform specific) artifact satisfying supported tags.
Artifacts are ranked as follows:
+ If the artifact is a wheel, rank it based on its best matching tag.
+ If the artifact is an sdist, rank it as usable, but a worse match than any wheel.
+ Otherwise treat the artifact as unusable.
:param target: The target looking to pick a resolve to use.
:param build: Whether sdists are allowed.
:param use_wheel: Whether wheels are allowed.
:return: The highest ranked artifact if the requirement is compatible with the target else
`None`.
"""
highest_rank_artifact = None # type: Optional[RankedArtifact]
for artifact in self.iter_artifacts():
if build and artifact.is_source:
# N.B.: Ensure sdists are picked last amongst a set of artifacts. We do this, since
# a wheel is known to work with a target by the platform tags on the tin, whereas an
# sdist may not successfully build for a given target at all. This is an affordance
# for LockStyle.SOURCES and LockStyle.CROSS_PLATFORM lock styles.
sdist_rank = target.supported_tags.lowest_rank.lower()
ranked_artifact = RankedArtifact(artifact=artifact, rank=sdist_rank)
if (
highest_rank_artifact is None
or ranked_artifact
is highest_rank_artifact.select_higher_ranked(ranked_artifact)
):
highest_rank_artifact = ranked_artifact
elif use_wheel:
for tag in artifact.parse_tags():
wheel_rank = target.supported_tags.rank(tag)
if wheel_rank is None:
continue
ranked_artifact = RankedArtifact(artifact=artifact, rank=wheel_rank)
if (
highest_rank_artifact is None
or ranked_artifact
is highest_rank_artifact.select_higher_ranked(ranked_artifact)
):
highest_rank_artifact = ranked_artifact
return highest_rank_artifact
@attr.s(frozen=True)
class _ResolveRequest(object):
@classmethod
def root(cls, requirement):
# type: (Requirement) -> _ResolveRequest
return cls(required_by=(requirement,), requirement=requirement)
required_by = attr.ib() # type: Tuple[Requirement, ...]
requirement = attr.ib() # type: Requirement
extras = attr.ib(default=None) # type: Optional[Tuple[str, ...]]
@property
def project_name(self):
# type: () -> ProjectName
return ProjectName(self.requirement.project_name)
def request_dependencies(self, locked_requirement):
# type: (LockedRequirement) -> Iterator[_ResolveRequest]
for requires_dist in locked_requirement.requires_dists:
yield _ResolveRequest(
required_by=self.required_by + (requires_dist,),
requirement=requires_dist,
extras=self.requirement.extras,
)
def render_via(self):
# type: () -> str
return "via: {via}".format(via=" -> ".join(map(str, self.required_by)))
@attr.s(frozen=True)
class _ResolvedArtifact(object):
ranked_artifact = attr.ib() # type: RankedArtifact
locked_requirement = attr.ib() # type: LockedRequirement
@property
def artifact(self):
# type: () -> Artifact
return self.ranked_artifact.artifact
@property
def version(self):
# type: () -> Union[packaging_version.LegacyVersion, packaging_version.Version]
return self.locked_requirement.pin.version.parsed_version
def select_higher_rank(
self,
other, # type: _ResolvedArtifact
prefer_older_binary=False, # type: bool
):
# type: (...) -> _ResolvedArtifact
if prefer_older_binary and self.artifact.is_source ^ other.artifact.is_source:
return Rank.select_highest_rank(self, other, lambda ra: ra.ranked_artifact.rank)
if self.version == other.version:
return Rank.select_highest_rank(self, other, lambda ra: ra.ranked_artifact.rank)
return self if self.version > other.version else other
@attr.s(frozen=True)
class DownloadableArtifact(object):
@classmethod
def create(
cls,
pin, # type: Pin
artifact, # type: Artifact
satisfied_direct_requirements=(), # type: Iterable[Requirement]
):
# type: (...) -> DownloadableArtifact
return cls(
pin=pin,
artifact=artifact,
satisfied_direct_requirements=SortedTuple(satisfied_direct_requirements, key=str),
)
pin = attr.ib() # type: Pin
artifact = attr.ib() # type: Artifact
satisfied_direct_requirements = attr.ib(default=SortedTuple()) # type: SortedTuple[Requirement]
@attr.s(frozen=True)
class Resolved(object):
@classmethod
def create(
cls,
target, # type: Target
direct_requirements, # type: Iterable[Requirement]
downloadable_requirements, # type: Iterable[_ResolvedArtifact]
):
# type: (...) -> Resolved
direct_requirements_by_project_name = defaultdict(
list
) # type: DefaultDict[ProjectName, List[Requirement]]
for requirement in direct_requirements:
direct_requirements_by_project_name[ProjectName(requirement.project_name)].append(
requirement
)
# N.B.: Lowest rank means highest rank value. I.E.: The 1st tag is the most specific and
# the 765th tag is the least specific.
largest_rank_value = target.supported_tags.lowest_rank.value
smallest_rank_value = TagRank.highest_natural().value
rank_span = largest_rank_value - smallest_rank_value
downloadable_artifacts = []
target_specificities = []
for downloadable_requirement in downloadable_requirements:
pin = downloadable_requirement.locked_requirement.pin
downloadable_artifacts.append(
DownloadableArtifact.create(
pin=pin,
artifact=downloadable_requirement.artifact,
satisfied_direct_requirements=direct_requirements_by_project_name[
pin.project_name
],
)
)
target_specificities.append(
(
rank_span
- (downloadable_requirement.ranked_artifact.rank.value - smallest_rank_value)
)
/ rank_span
)
return cls(
target_specificity=sum(target_specificities) / len(target_specificities),
downloadable_artifacts=tuple(downloadable_artifacts),
)
target_specificity = attr.ib() # type: float
downloadable_artifacts = attr.ib() # type: Tuple[DownloadableArtifact, ...]
@attr.s(frozen=True)
class LockedResolve(object):
@classmethod
def create(
cls,
platform_tag, # type: tags.Tag
resolved_requirements, # type: Iterable[ResolvedRequirement]
dist_metadatas, # type: Iterable[DistMetadata]
url_fetcher, # type: URLFetcher
):
# type: (...) -> LockedResolve
# TODO(John Sirois): Introduce a thread pool and pump these fetches to workers via a Queue.
def fingerprint_url(url):
# type: (str) -> Fingerprint
with url_fetcher.get_body_stream(url) as body_stream:
return Fingerprint.from_stream(body_stream)
fingerprint_by_url = {
url: fingerprint_url(url)
for url in set(
itertools.chain.from_iterable(
resolved_requirement._iter_urls_to_fingerprint()
for resolved_requirement in resolved_requirements
)
)
}
def resolve_fingerprint(partial_artifact):
# type: (PartialArtifact) -> Artifact
return Artifact(
url=partial_artifact.url,
fingerprint=partial_artifact.fingerprint
or fingerprint_by_url[partial_artifact.url],
)
dist_metadata_by_pin = {
Pin(dist_info.project_name, dist_info.version): dist_info
for dist_info in dist_metadatas
}
locked_requirements = []
for resolved_requirement in resolved_requirements:
distribution_metadata = dist_metadata_by_pin.get(resolved_requirement.pin)
if distribution_metadata is None:
raise ValueError(
"No distribution metadata found for {project}.\n"
"Given distribution metadata for:\n"
"{projects}".format(
project=resolved_requirement.pin.as_requirement(),
projects="\n".join(
sorted(str(pin.as_requirement()) for pin in dist_metadata_by_pin)
),
)
)
locked_requirements.append(
LockedRequirement.create(
pin=resolved_requirement.pin,
artifact=resolve_fingerprint(resolved_requirement.artifact),
requires_dists=distribution_metadata.requires_dists,
requires_python=distribution_metadata.requires_python,
additional_artifacts=(
resolve_fingerprint(artifact)
for artifact in resolved_requirement.additional_artifacts
),
)
)
return cls(platform_tag=platform_tag, locked_requirements=SortedTuple(locked_requirements))
platform_tag = attr.ib(order=str) # type: tags.Tag
locked_requirements = attr.ib() # type: SortedTuple[LockedRequirement]
def emit_requirements(self, stream):
# type: (IO[str]) -> None
def emit_artifact(
artifact, # type: Artifact
line_continuation, # type: bool
):
# type: (...) -> None
stream.write(
" --hash={algorithm}:{hash} {line_continuation}\n".format(
algorithm=artifact.fingerprint.algorithm,
hash=artifact.fingerprint.hash,
line_continuation=" \\" if line_continuation else "",
)
)
for locked_requirement in self.locked_requirements:
stream.write(
"{project_name}=={version} \\\n".format(
project_name=locked_requirement.pin.project_name,
version=locked_requirement.pin.version,
)
)
emit_artifact(
locked_requirement.artifact,
line_continuation=bool(locked_requirement.additional_artifacts),
)
for index, additional_artifact in enumerate(
locked_requirement.additional_artifacts, start=1
):
emit_artifact(
additional_artifact,
line_continuation=index != len(locked_requirement.additional_artifacts),
)
def resolve(
self,
target, # type: Target
requirements, # type: Iterable[Requirement]
constraints=(), # type: Iterable[Requirement]
source=None, # type: Optional[str]
transitive=True, # type: bool
build=True, # type: bool
use_wheel=True, # type: bool
prefer_older_binary=False, # type: bool
):
# type: (...) -> Union[Resolved, Error]
is_local_interpreter = isinstance(target, LocalInterpreter)
if not use_wheel:
if not build:
return Error(
"Cannot both ignore wheels (use_wheel=False) and refrain from building "
"distributions (build=False)."
)
elif not is_local_interpreter:
return Error(
"Cannot ignore wheels (use_wheel=False) when resolving for a platform: given "
"{platform_description}".format(
platform_description=target.render_description()
)
)
if not is_local_interpreter:
build = False
repository = defaultdict(list) # type: DefaultDict[ProjectName, List[LockedRequirement]]
for locked_requirement in self.locked_requirements:
repository[locked_requirement.pin.project_name].append(locked_requirement)
# 1. Gather all required projects and their requirers.
required = OrderedDict() # type: OrderedDict[ProjectName, List[_ResolveRequest]]
to_be_resolved = deque() # type: Deque[_ResolveRequest]
def request_resolve(requests):
# type: (Iterable[_ResolveRequest]) -> None
to_be_resolved.extend(
request
for request in requests
if target.requirement_applies(request.requirement, extras=request.extras)
)
visited = set() # type: Set[ProjectName]
request_resolve(_ResolveRequest.root(requirement) for requirement in requirements)
while to_be_resolved:
resolve_request = to_be_resolved.popleft()
project_name = resolve_request.project_name
required.setdefault(project_name, []).append(resolve_request)
if not transitive or project_name in visited:
continue
visited.add(project_name)
for locked_requirement in repository[project_name]:
request_resolve(resolve_request.request_dependencies(locked_requirement))
# 2. Select either the best fit artifact for each requirement or collect an error.
constraints_by_project_name = {
ProjectName(constraint.project_name): constraint for constraint in constraints
}
resolved_artifacts = []
errors = []
for project_name, resolve_requests in required.items():
reasons = [] # type: List[str]
best_match = None # type: Optional[_ResolvedArtifact]
for locked_requirement in repository[project_name]:
def attributed_reason(reason):
# type: (str) -> str
if len(resolve_requests) == 1:
return "{pin} ({via}) {reason}".format(
pin=locked_requirement.pin,
via=resolve_requests[0].render_via(),
reason=reason,
)
return (
"{pin} {reason}\n"
" requirers:\n"
" {vias}".format(
pin=locked_requirement.pin,
reason=reason,
vias="\n ".join(rr.render_via() for rr in resolve_requests),
)
)
if locked_requirement.requires_python and not target.requires_python_applies(
locked_requirement.requires_python,
source=locked_requirement.pin.as_requirement(),
):
reasons.append(
attributed_reason(
"requires Python {specifier}".format(
specifier=locked_requirement.requires_python,
)
)
)
continue
version_mismatches = []
for resolve_request in resolve_requests:
if (
str(locked_requirement.pin.version)
not in resolve_request.requirement.specifier
):
version_mismatches.append(
"{specifier} ({via})".format(
specifier=resolve_request.requirement.specifier,
via=resolve_request.render_via(),
)
)
constraint = constraints_by_project_name.get(locked_requirement.pin.project_name)
if (
constraint is not None
and str(locked_requirement.pin.version) not in constraint.specifier
):
version_mismatches.append(
"{specifier} (via: constraint)".format(specifier=constraint.specifier)
)
if version_mismatches:
reasons.append(
"{pin} does not satisfy the following requirements:\n{mismatches}".format(
pin=locked_requirement.pin,
mismatches="\n".join(
" {version_mismatch}".format(version_mismatch=version_mismatch)
for version_mismatch in version_mismatches
),
)
)
continue
ranked_artifact = locked_requirement.select_artifact(
target,
build=build,
use_wheel=use_wheel,
)
if not ranked_artifact:
reasons.append(
attributed_reason(
"does not have any compatible artifacts:\n{artifacts}".format(
artifacts="\n".join(
" {url}".format(url=artifact.url)
for artifact in locked_requirement.iter_artifacts()
)
)
)
)
continue
resolved_artifact = _ResolvedArtifact(ranked_artifact, locked_requirement)
if best_match is None or resolved_artifact is best_match.select_higher_rank(
resolved_artifact, prefer_older_binary=prefer_older_binary
):
best_match = resolved_artifact
if not best_match:
if reasons:
errors.append(
"Dependency on {project_name} not satisfied, {count} incompatible "
"{candidates} found:\n{reasons}".format(
project_name=project_name,
count=len(reasons),
candidates=pluralize(reasons, "candidate"),
reasons="\n".join(
"{index}.) {reason}".format(index=index, reason=reason)
for index, reason in enumerate(reasons, start=1)
),
)
)
elif len(resolve_requests) == 1:
errors.append(
"Dependency on {project_name} ({via}) not satisfied, no candidates "
"found.".format(
project_name=project_name, via=resolve_requests[0].render_via()
)
)
else:
errors.append(
"Dependency on {project_name} not satisfied, no candidates found:\n"
" requirers:\n"
" {vias}".format(
project_name=project_name,
vias="\n ".join(rr.render_via() for rr in resolve_requests),
)
)
continue
resolved_artifacts.append(best_match)
if errors:
from_source = " from {source}".format(source=source) if source else ""
return Error(
"Failed to resolve all requirements for {target}{from_source}:\n"
"\n"
"Configured with:\n"
" build: {build}\n"
" use_wheel: {use_wheel}\n"
"\n"
"{errors}".format(
target=target.render_description(),
from_source=from_source,
build=build,
use_wheel=use_wheel,
errors="\n\n".join("{error}".format(error=error) for error in errors),
)
)
return Resolved.create(
target=target,
direct_requirements=requirements,
downloadable_requirements=resolved_artifacts,
)
|
|
"""Module for testing string variables."""
class TestStringVar(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.rawData = []
self.dataByKey = {}
for i in range(1, 11):
stringCol = "String %d" % i
fixedCharCol = ("Fixed Char %d" % i).ljust(40)
rawCol = "Raw %d" % i
if i % 2:
nullableCol = "Nullable %d" % i
else:
nullableCol = None
dataTuple = (i, stringCol, rawCol, fixedCharCol, nullableCol)
self.rawData.append(dataTuple)
self.dataByKey[i] = dataTuple
def testBindString(self):
"test binding in a string"
self.cursor.execute("""
select * from TestStrings
where StringCol = :p_Value""",
p_Value = "String 5")
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[5]])
def testBindDifferentVar(self):
"test binding a different variable on second execution"
retval_1 = self.cursor.var(cx_Oracle.STRING, 30)
retval_2 = self.cursor.var(cx_Oracle.STRING, 30)
self.cursor.execute("begin :retval := 'Called'; end;",
retval = retval_1)
self.failUnlessEqual(retval_1.getvalue(), "Called")
self.cursor.execute("begin :retval := 'Called'; end;",
retval = retval_2)
self.failUnlessEqual(retval_2.getvalue(), "Called")
def testBindStringAfterNumber(self):
"test binding in a string after setting input sizes to a number"
self.cursor.setinputsizes(p_Value = cx_Oracle.NUMBER)
self.cursor.execute("""
select * from TestStrings
where StringCol = :p_Value""",
p_Value = "String 6")
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[6]])
def testBindStringArrayDirect(self):
"test binding in a string array"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
array = [r[1] for r in self.rawData]
statement = """
begin
:p_ReturnValue := pkg_TestStringArrays.TestInArrays(
:p_IntegerValue, :p_Array);
end;"""
self.cursor.execute(statement,
p_ReturnValue = returnValue,
p_IntegerValue = 5,
p_Array = array)
self.failUnlessEqual(returnValue.getvalue(), 86)
array = [ "String - %d" % i for i in range(15) ]
self.cursor.execute(statement,
p_IntegerValue = 8,
p_Array = array)
self.failUnlessEqual(returnValue.getvalue(), 163)
def testBindStringArrayBySizes(self):
"test binding in a string array (with setinputsizes)"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
self.cursor.setinputsizes(p_Array = [cx_Oracle.STRING, 10])
array = [r[1] for r in self.rawData]
self.cursor.execute("""
begin
:p_ReturnValue := pkg_TestStringArrays.TestInArrays(
:p_IntegerValue, :p_Array);
end;""",
p_ReturnValue = returnValue,
p_IntegerValue = 6,
p_Array = array)
self.failUnlessEqual(returnValue.getvalue(), 87)
def testBindStringArrayByVar(self):
"test binding in a string array (with arrayvar)"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
array = self.cursor.arrayvar(cx_Oracle.STRING, 10, 20)
array.setvalue(0, [r[1] for r in self.rawData])
self.cursor.execute("""
begin
:p_ReturnValue := pkg_TestStringArrays.TestInArrays(
:p_IntegerValue, :p_Array);
end;""",
p_ReturnValue = returnValue,
p_IntegerValue = 7,
p_Array = array)
self.failUnlessEqual(returnValue.getvalue(), 88)
def testBindInOutStringArrayByVar(self):
"test binding in/out a string array (with arrayvar)"
array = self.cursor.arrayvar(cx_Oracle.STRING, 10, 100)
originalData = [r[1] for r in self.rawData]
expectedData = ["Converted element # %d originally had length %d" % \
(i, len(originalData[i - 1])) for i in range(1, 6)] + \
originalData[5:]
array.setvalue(0, originalData)
self.cursor.execute("""
begin
pkg_TestStringArrays.TestInOutArrays(:p_NumElems, :p_Array);
end;""",
p_NumElems = 5,
p_Array = array)
self.failUnlessEqual(array.getvalue(), expectedData)
def testBindOutStringArrayByVar(self):
"test binding out a string array (with arrayvar)"
array = self.cursor.arrayvar(cx_Oracle.STRING, 6, 100)
expectedData = ["Test out element # %d" % i for i in range(1, 7)]
self.cursor.execute("""
begin
pkg_TestStringArrays.TestOutArrays(:p_NumElems, :p_Array);
end;""",
p_NumElems = 6,
p_Array = array)
self.failUnlessEqual(array.getvalue(), expectedData)
def testBindRaw(self):
"test binding in a raw"
self.cursor.setinputsizes(p_Value = cx_Oracle.BINARY)
self.cursor.execute("""
select * from TestStrings
where RawCol = :p_Value""",
p_Value = "Raw 4")
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[4]])
def testBindAndFetchRowid(self):
"test binding (and fetching) a rowid"
self.cursor.execute("""
select rowid
from TestStrings
where IntCol = 3""")
rowid, = self.cursor.fetchone()
self.cursor.execute("""
select *
from TestStrings
where rowid = :p_Value""",
p_Value = rowid)
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[3]])
def testBindNull(self):
"test binding in a null"
self.cursor.execute("""
select * from TestStrings
where StringCol = :p_Value""",
p_Value = None)
self.failUnlessEqual(self.cursor.fetchall(), [])
def testBindOutSetInputSizesByType(self):
"test binding out with set input sizes defined (by type)"
vars = self.cursor.setinputsizes(p_Value = cx_Oracle.STRING)
self.cursor.execute("""
begin
:p_Value := 'TSI';
end;""")
self.failUnlessEqual(vars["p_Value"].getvalue(), "TSI")
def testBindOutSetInputSizesByInteger(self):
"test binding out with set input sizes defined (by integer)"
vars = self.cursor.setinputsizes(p_Value = 30)
self.cursor.execute("""
begin
:p_Value := 'TSI (I)';
end;""")
self.failUnlessEqual(vars["p_Value"].getvalue(), "TSI (I)")
def testBindInOutSetInputSizesByType(self):
"test binding in/out with set input sizes defined (by type)"
vars = self.cursor.setinputsizes(p_Value = cx_Oracle.STRING)
self.cursor.execute("""
begin
:p_Value := :p_Value || ' TSI';
end;""",
p_Value = "InVal")
self.failUnlessEqual(vars["p_Value"].getvalue(), "InVal TSI")
def testBindInOutSetInputSizesByInteger(self):
"test binding in/out with set input sizes defined (by integer)"
vars = self.cursor.setinputsizes(p_Value = 30)
self.cursor.execute("""
begin
:p_Value := :p_Value || ' TSI (I)';
end;""",
p_Value = "InVal")
self.failUnlessEqual(vars["p_Value"].getvalue(), "InVal TSI (I)")
def testBindOutVar(self):
"test binding out with cursor.var() method"
var = self.cursor.var(cx_Oracle.STRING)
self.cursor.execute("""
begin
:p_Value := 'TSI (VAR)';
end;""",
p_Value = var)
self.failUnlessEqual(var.getvalue(), "TSI (VAR)")
def testBindInOutVarDirectSet(self):
"test binding in/out with cursor.var() method"
var = self.cursor.var(cx_Oracle.STRING)
var.setvalue(0, "InVal")
self.cursor.execute("""
begin
:p_Value := :p_Value || ' TSI (VAR)';
end;""",
p_Value = var)
self.failUnlessEqual(var.getvalue(), "InVal TSI (VAR)")
def testBindLongString(self):
"test that binding a long string succeeds"
self.cursor.execute("""
declare
t_Temp varchar2(10000);
begin
t_Temp := :bigString;
end;""",
bigString = "X" * 10000)
def testBindLongStringAfterSettingSize(self):
"test that setinputsizes() returns a long variable"
var = self.cursor.setinputsizes(test = 90000)["test"]
self.failUnlessEqual(type(var), cx_Oracle.LONG_STRING)
inString = "1234567890" * 9000
var.setvalue(0, inString)
outString = var.getvalue()
self.failUnlessEqual(inString, outString,
"output does not match: in was %d, out was %d" % \
(len(inString), len(outString)))
def testStringMaximumReached(self):
"test that an error is raised when maximum string length exceeded"
var = self.cursor.setinputsizes(test = 100)["test"]
inString = "1234567890" * 400
var.setvalue(0, inString)
outString = var.getvalue()
self.failUnlessEqual(inString, outString,
"output does not match: in was %d, out was %d" % \
(len(inString), len(outString)))
badStringSize = 4000 * self.connection.maxBytesPerCharacter + 1
inString = "X" * badStringSize
self.failUnlessRaises(ValueError, var.setvalue, 0, inString)
def testCursorDescription(self):
"test cursor description is accurate"
self.cursor.execute("select * from TestStrings")
self.failUnlessEqual(self.cursor.description,
[ ('INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
('STRINGCOL', cx_Oracle.STRING, 20, 20, 0, 0, 0),
('RAWCOL', cx_Oracle.BINARY, 30, 30, 0, 0, 0),
('FIXEDCHARCOL', cx_Oracle.FIXED_CHAR, 40, 40, 0, 0, 0),
('NULLABLECOL', cx_Oracle.STRING, 50, 50, 0, 0, 1) ])
def testFetchAll(self):
"test that fetching all of the data returns the correct results"
self.cursor.execute("select * From TestStrings order by IntCol")
self.failUnlessEqual(self.cursor.fetchall(), self.rawData)
self.failUnlessEqual(self.cursor.fetchall(), [])
def testFetchMany(self):
"test that fetching data in chunks returns the correct results"
self.cursor.execute("select * From TestStrings order by IntCol")
self.failUnlessEqual(self.cursor.fetchmany(3), self.rawData[0:3])
self.failUnlessEqual(self.cursor.fetchmany(2), self.rawData[3:5])
self.failUnlessEqual(self.cursor.fetchmany(4), self.rawData[5:9])
self.failUnlessEqual(self.cursor.fetchmany(3), self.rawData[9:])
self.failUnlessEqual(self.cursor.fetchmany(3), [])
def testFetchOne(self):
"test that fetching a single row returns the correct results"
self.cursor.execute("""
select *
from TestStrings
where IntCol in (3, 4)
order by IntCol""")
self.failUnlessEqual(self.cursor.fetchone(), self.dataByKey[3])
self.failUnlessEqual(self.cursor.fetchone(), self.dataByKey[4])
self.failUnlessEqual(self.cursor.fetchone(), None)
|
|
"""The following methods may be used to calculate the crosscorrelation and
autocorrelation for a time series.
These methods are 'special' in the sense that they are able to deal with
irregular time steps often observed in hydrological time series.
"""
from numpy import (append, arange, array, average, corrcoef, diff, empty_like,
exp, inf, nan, ones, pi, sqrt)
from pandas import DataFrame, Timedelta, TimedeltaIndex
from scipy.stats import norm
from ..decorators import njit
from ..utils import check_numba
def acf(x, lags=365, bin_method='rectangle', bin_width=0.5, max_gap=inf,
min_obs=20, full_output=False, alpha=0.05):
"""Calculate the autocorrelation function for irregular time steps.
Parameters
----------
x: pandas.Series
Pandas Series containing the values to calculate the
cross-correlation for. The index has to be a Pandas.DatetimeIndex
lags: array_like, optional
numpy array containing the lags in days for which the
cross-correlation if calculated. [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12,
13, 14, 30, 61, 90, 120, 150, 180, 210, 240, 270, 300, 330, 365]
bin_method: str, optional
method to determine the type of bin. Options are "rectangle" (default),
and "gaussian".
bin_width: float, optional
number of days used as the width for the bin to calculate the
correlation. By default these values are chosen based on the
bin_method and the average time step (dt_mu). That is 0.5dt_mu when
bin_method="rectangle" and 0.25dt_mu when bin_method="gaussian".
max_gap: float, optional
Maximum time step gap in the data. All time steps above this gap value
are not used for calculating the average time step. This can be
helpful when there is a large gap in the data that influences the
average time step.
min_obs: int, optional
Minimum number of observations in a bin to determine the correlation.
full_output: bool, optional
If True, also estimated uncertainties are returned. Default is False.
alpha: float
alpha level to compute the confidence interval (e.g., 1-alpha).
Returns
-------
c: pandas.Series or pandas.DataFrame
The autocorrelation function for the provided lags.
Notes
-----
Calculate the autocorrelation function for irregular timesteps based on
the slotting technique. Different methods (kernels) to bin the data are
available.
References
----------
Rehfeld, K., Marwan, N., Heitzig, J., Kurths, J. (2011). Comparison
of correlation analysis techniques for irregularly sampled time series.
Nonlinear Processes in Geophysics. 18. 389-404. 10.5194 pg-18-389-2011.
Tip
---
If the time series have regular time step we recommend to use the acf
method from the Statsmodels package.
Examples
--------
For example, to estimate the autocorrelation for every second lag up to
lags of one year:
>>> acf = ps.stats.acf(x, lags=np.arange(1.0, 366.0, 2.0))
See Also
--------
pastas.stats.ccf
statsmodels.api.tsa.acf
"""
c = ccf(x=x, y=x, lags=lags, bin_method=bin_method, bin_width=bin_width,
max_gap=max_gap, min_obs=min_obs, full_output=full_output,
alpha=alpha)
c.name = "ACF"
if full_output:
return c.rename(columns={"ccf": "acf"})
else:
return c
def ccf(x, y, lags=365, bin_method='rectangle', bin_width=0.5,
max_gap=inf, min_obs=20, full_output=False, alpha=0.05):
"""Method to compute the cross-correlation for irregular time series.
Parameters
----------
x,y: pandas.Series
Pandas Series containing the values to calculate the
cross-correlation for. The index has to be a Pandas.DatetimeIndex
lags: array_like, optional
numpy array containing the lags in days for which the
cross-correlation is calculated. Default [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
12, 13, 14, 30, 61, 90, 120, 150, 180, 210, 240, 270, 300, 330, 365]
bin_method: str, optional
method to determine the type of bin. Options are "rectangle" (default),
"gaussian" and "regular" (for regular timesteps).
bin_width: float, optional
number of days used as the width for the bin to calculate the
correlation. By default these values are chosen based on the
bin_method and the average time step (dt_mu). That is 0.5dt_mu when
bin_method="rectangle" and 0.25dt_mu when bin_method="gaussian".
max_gap: float, optional
Maximum timestep gap in the data. All timesteps above this gap value
are not used for calculating the average timestep. This can be
helpful when there is a large gap in the data that influences the
average timestep.
min_obs: int, optional
Minimum number of observations in a bin to determine the correlation.
full_output: bool, optional
If True, also estimated uncertainties are returned. Default is False.
alpha: float
alpha level to compute the confidence interval (e.g., 1-alpha).
Returns
-------
c: pandas.Series or pandas.DataFrame
The Cross-correlation function.
References
----------
Rehfeld, K., Marwan, N., Heitzig, J., Kurths, J. (2011). Comparison
of correlation analysis techniques for irregularly sampled time series.
Nonlinear Processes in Geophysics. 18. 389-404. 10.5194 pg-18-389-2011.
Tip
---
This method will be significantly faster when Numba is installed. Check
out the [Numba project here](https://numba.pydata.org)
Examples
--------
>>> ccf = ps.stats.ccf(x, y, bin_method="gaussian")
"""
# prepare the time indices for x and y
if x.index.inferred_freq and y.index.inferred_freq:
bin_method = "regular"
elif bin_method == "regular":
raise Warning("time series does not have regular time steps, "
"choose different bin_method")
x, t_x, dt_x_mu = _preprocess(x, max_gap=max_gap)
y, t_y, dt_y_mu = _preprocess(y, max_gap=max_gap)
dt_mu = max(dt_x_mu, dt_y_mu) # Mean time step from both series
if isinstance(lags, int) and bin_method == "regular":
lags = arange(int(dt_mu), lags + 1, int(dt_mu), dtype=float)
elif isinstance(lags, int):
lags = arange(1.0, lags + 1, dtype=float)
elif isinstance(lags, list):
lags = array(lags, dtype=float)
if bin_method == "rectangle":
if bin_width is None:
bin_width = 0.5 * dt_mu
check_numba()
c, b = _compute_ccf_rectangle(lags, t_x, x, t_y, y, bin_width)
elif bin_method == "gaussian":
if bin_width is None:
bin_width = 0.25 * dt_mu
check_numba()
c, b = _compute_ccf_gaussian(lags, t_x, x, t_y, y, bin_width)
elif bin_method == "regular":
c, b = _compute_ccf_regular(arange(1.0, len(lags) + 1), x, y)
else:
raise NotImplementedError
std = norm.ppf(1 - alpha / 2.) / sqrt(b)
result = DataFrame(data={"ccf": c, "stderr": std, "n": b},
index=TimedeltaIndex(lags, unit="D", name="Lags"))
result = result.where(result.n > min_obs).dropna()
if full_output:
return result
else:
return result.ccf
def _preprocess(x, max_gap):
"""Internal method to preprocess the time series."""
dt = x.index.to_series().diff().dropna().values / Timedelta(1, "D")
dt_mu = dt[dt < max_gap].mean() # Deal with big gaps if present
t = dt.cumsum()
# Normalize the values and create numpy arrays
x = (x.values - x.values.mean()) / x.values.std()
return x, t, dt_mu
@njit
def _compute_ccf_rectangle(lags, t_x, x, t_y, y, bin_width=0.5):
"""Internal numba-optimized method to compute the ccf."""
c = empty_like(lags)
b = empty_like(lags)
l = len(lags)
n = len(t_x)
for k in range(l):
cl = 0.
b_sum = 0.
for i in range(n):
for j in range(n):
d = abs(t_x[i] - t_y[j]) - lags[k]
if abs(d) <= bin_width:
cl += x[i] * y[j]
b_sum += 1
if b_sum == 0.:
c[k] = nan
b[k] = 0.01 # Prevent division by zero error
else:
c[k] = cl / b_sum
b[k] = b_sum / 2 # divide by 2 because we over count in for-loop
return c, b
@njit
def _compute_ccf_gaussian(lags, t_x, x, t_y, y, bin_width=0.5):
"""Internal numba-optimized method to compute the ccf."""
c = empty_like(lags)
b = empty_like(lags)
l = len(lags)
n = len(t_x)
den1 = -2 * bin_width ** 2 # denominator 1
den2 = sqrt(2 * pi * bin_width) # denominator 2
for k in range(l):
cl = 0.
b_sum = 0.
for i in range(n):
for j in range(n):
d = t_x[i] - t_y[j] - lags[k]
d = exp(d ** 2 / den1) / den2
cl += x[i] * y[j] * d
b_sum += d
if b_sum == 0.:
c[k] = nan
b[k] = 0.01 # Prevent division by zero error
else:
c[k] = cl / b_sum
b[k] = b_sum / 2 # divide by 2 because we over count in for-loop
return c, b
def _compute_ccf_regular(lags, x, y):
c = empty_like(lags)
for i, lag in enumerate(lags):
c[i] = corrcoef(x[:-int(lag)], y[int(lag):])[0, 1]
b = len(x) - lags
return c, b
def mean(x, weighted=True, max_gap=30):
"""Method to compute the (weighted) mean of a time series.
Parameters
----------
x: pandas.Series
Series with the values and a DatetimeIndex as an index.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is True.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
mean weight. Default value is 90 days.
Notes
-----
The (weighted) mean for a time series x is computed as:
.. math:: \\bar{x} = \\sum_{i=1}^{N} w_i x_i
where :math:`w_i` are the weights, taken as the time step between
observations, normalized by the sum of all time steps.
"""
w = _get_weights(x, weighted=weighted, max_gap=max_gap)
return average(x.to_numpy(), weights=w)
def var(x, weighted=True, max_gap=30):
"""Method to compute the (weighted) variance of a time series.
Parameters
----------
x: pandas.Series
Series with the values and a DatetimeIndex as an index.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is True.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
mean weight. Default value is 90 days.
Notes
-----
The (weighted) variance for a time series x is computed as:
.. math:: \\sigma_x^2 = \\sum_{i=1}^{N} w_i (x_i - \\bar{x})^2
where :math:`w_i` are the weights, taken as the time step between
observations, normalized by the sum of all time steps. Note how
weighted mean (:math:`\\bar{x}`) is used in this formula.
"""
w = _get_weights(x, weighted=weighted, max_gap=max_gap)
mu = average(x.to_numpy(), weights=w)
sigma = (x.size / (x.size - 1) * w * (x.to_numpy() - mu) ** 2).sum()
return sigma
def std(x, weighted=True, max_gap=30):
"""Method to compute the (weighted) variance of a time series.
Parameters
----------
x: pandas.Series
Series with the values and a DatetimeIndex as an index.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is True.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
mean weight. Default value is 90 days.
See Also
--------
ps.stats.mean, ps.stats.var
"""
return sqrt(var(x, weighted=weighted, max_gap=max_gap))
# Helper functions
def _get_weights(x, weighted, max_gap=30):
"""Helper method to compute the weights as the time step between obs.
Parameters
----------
x: pandas.Series
Series with the values and a DatetimeIndex as an index.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
mean weight. Default value is 30 days.
"""
if weighted:
w = append(0.0, diff(x.index.to_numpy()) / Timedelta("1D"))
w[w > max_gap] = max_gap
else:
w = ones(x.index.size)
w /= w.sum()
return w
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import json
import os
import threading
import time
from girder import config
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.user import User
from tests import base
# boiler plate to start and stop the server
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')
config.loadConfig() # Must reload config to pickup correct port
def setUpModule():
base.enabledPlugins.append('database_assetstore')
base.startServer(False)
def tearDownModule():
base.stopServer()
def mergeDicts(*args):
"""
Merge dictionaries.
:params *args: any number of dictionaries.
:returns: a merged dictionary
"""
result = {}
for item in args:
result.update(item)
return result
class FileTest(base.TestCase):
dbParams = {
'name': 'Assetstore 1',
'type': 'database', # AssetstoreType.DATABASE
'dbtype': 'sqlalchemy_postgres',
'dburi': os.environ.get('GIRDER_DATABASE_ASSETSTORE_POSTGRES_DB',
'postgresql://postgres@127.0.0.1/sampledb'),
}
def _setupDbFiles(self, args={}):
"""
Set up db files, one using sqlalchemy_postgres, one sqlalchemy, and
one not fully specified. This creates a database assetstore for each
of the first two.
:param args: additional arguments to set on database connections.
:returns: the three file ids.
"""
dbParams = self.dbParams.copy()
dbParams.update(args)
resp = self.request(method='POST', path='/assetstore', user=self.admin,
params=dbParams)
self.assertStatusOk(resp)
self.assetstore1 = resp.json
dbParams2 = dbParams.copy()
dbParams2['name'] = 'Assetstore 2'
dbParams2['dbtype'] = 'sqlalchemy'
# Use the generic sql class, rather than the dialect-specific class.
dbParams2['dburi'] = 'sqlalchemy:' + dbParams2['dburi']
dbParams2.update(args)
resp = self.request(method='POST', path='/assetstore', user=self.admin,
params=dbParams2)
self.assertStatusOk(resp)
self.assetstore2 = resp.json
from girder.plugins.database_assetstore.assetstore import DB_INFO_KEY
self.file1 = File().createFile(
name='file1', creator=self.admin, item=self.item1, size=0,
assetstore=self.assetstore1, saveFile=False)
self.file1[DB_INFO_KEY] = {'table': 'towns'}
File().save(self.file1)
self.file2 = File().createFile(
name='file2', creator=self.admin, item=self.item2, size=0,
assetstore=self.assetstore2, saveFile=False)
self.file2[DB_INFO_KEY] = {'table': 'towns'}
File().save(self.file2)
self.file3 = File().createFile(
name='file3', creator=self.admin, item=self.item1, size=0,
assetstore=self.assetstore1, saveFile=True)
fileId = str(self.file1['_id'])
fileId2 = str(self.file2['_id'])
fileId3 = str(self.file3['_id'])
return fileId, fileId2, fileId3
def setUp(self):
base.TestCase.setUp(self)
users = ({
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
})
self.admin, self.user = [
User().createUser(**user) for user in users]
folders = Folder().childFolders(
self.admin, 'user', user=self.admin)
for folder in folders:
if folder['name'] == 'Public':
self.publicFolder = folder
self.item1 = Item().createItem(
'item1', creator=self.admin, folder=self.publicFolder)
self.item2 = Item().createItem(
'item2', creator=self.admin, folder=self.publicFolder)
def testFileDatabaseEndpoints(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
resp = self.request(path='/file/notafile/database', user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid ObjectId', resp.json['message'])
resp = self.request(path='/file/%s/database' % ('f' * len(fileId)),
user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid file', resp.json['message'])
resp = self.request(path='/file/%s/database' % fileId3, user=self.admin)
self.assertStatusOk(resp)
self.assertIsNone(resp.json)
resp = self.request(path='/file/%s/database' % fileId3, user=self.user)
self.assertStatusOk(resp)
self.assertIs(resp.json, False)
# Test the POST endpoint
resp = self.request(method='POST', path='/file/notafile/database',
user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid ObjectId', resp.json['message'])
resp = self.request(method='POST', path='/file/%s/database' % (
'f' * len(fileId)), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid file', resp.json['message'])
resp = self.request(method='POST', path='/file/%s/database' % (
fileId3, ), user=self.admin, type='application/json',
body=json.dumps({}))
self.assertStatus(resp, 400)
self.assertIn('must have a non-blank table value',
resp.json['message'])
params = {'table': 'towns', 'limit': 40}
resp = self.request(method='POST', path='/file/%s/database' % (
fileId3, ), user=self.admin, type='application/json',
body=json.dumps(params))
self.assertStatusOk(resp)
resp = self.request(path='/file/%s/database' % fileId3, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, params)
resp = self.request(path='/file/%s/database' % fileId3, user=self.user)
self.assertStatusOk(resp)
self.assertIs(resp.json, True)
params['table'] = None
resp = self.request(method='POST', path='/file/%s/database' % (
fileId3, ), user=self.admin, type='application/json',
body=json.dumps(params))
self.assertStatus(resp, 400)
self.assertIn('must have a non-blank table value',
resp.json['message'])
resp = self.request(method='POST', path='/file/%s/database' % (
fileId3, ), user=self.admin, type='application/json',
body=json.dumps({'other': 'value'}))
params['table'] = 'towns'
params['other'] = 'value'
resp = self.request(path='/file/%s/database' % fileId3, user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, params)
def testFileDatabaseBadConnectors(self):
from girder.plugins.database_assetstore import dbs
self.assertIsNone(dbs.getDBConnector('test1', {'uri': 'base'}))
dbs.base.registerConnectorClass('base', dbs.base.DatabaseConnector, {})
with self.assertRaises(dbs.DatabaseConnectorException):
dbs.getDBConnector('test1', {'uri': 'base'})
del dbs.base._connectorClasses['base']
class ValidatingConnector(dbs.base.DatabaseConnector):
def validate(self, *args, **kwargs):
return True
self.assertIsNone(dbs.getDBConnector('test1', {'type': 'validating'}))
dbs.base.registerConnectorClass('validating', ValidatingConnector, {})
self.assertIsNone(dbs.getDBConnector('test1', {'type': 'validating'}))
del dbs.base._connectorClasses['validating']
def testFileDatabaseBaseConnectorClass(self):
from girder.plugins.database_assetstore import dbs
# We have to subclass the base class and allow it to validate, or we
# can't create an instance of the class.
class ValidatingConnector(dbs.base.DatabaseConnector):
def validate(self, *args, **kwargs):
return True
conn = ValidatingConnector()
res = conn.performSelect()
self.assertEqual(res['data'], [])
self.assertEqual(res['fields'], [])
self.assertFalse(super(ValidatingConnector, conn).validate())
self.assertTrue(conn.checkOperatorDatatype('unknown', 'unknown'))
self.assertEqual(conn.getTableList('ignore'), [])
def testFileDatabaseFields(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
resp = self.request(path='/file/%s/database/fields' % (
fileId3, ), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('not a database link', resp.json['message'])
resp = self.request(path='/file/notafile/database/fields',
user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid ObjectId', resp.json['message'])
resp = self.request(path='/file/%s/database/fields' % (
'f' * len(fileId)), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid file', resp.json['message'])
resp = self.request(path='/file/%s/database/fields' % (
fileId, ), user=self.admin)
self.assertStatusOk(resp)
self.assertTrue(any([
col for col in resp.json if col['name'] == 'town']))
self.assertTrue(any([
col for col in resp.json if col['name'] == 'town' and col['datatype'] == 'string']))
self.assertTrue(any([
col for col in resp.json if col['name'] == 'geom' and col['datatype'] == 'geometry']))
resp = self.request(path='/file/%s/database/fields' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertTrue(len([
col for col in resp.json if col['name'] == 'town']) > 0)
# break the database link
resp = self.request(method='POST', path='/file/%s/database' % (
fileId, ), user=self.admin, type='application/json',
body=json.dumps({'table': '_notpresent'}))
self.assertStatusOk(resp)
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/fields' % (
fileId, ), user=self.admin)
def testFileDatabaseRefresh(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
resp = self.request(method='PUT', path='/file/%s/database/refresh' % (
fileId3, ), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('not a database link', resp.json['message'])
resp = self.request(
method='PUT', path='/file/notafile/database/refresh',
user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid ObjectId', resp.json['message'])
resp = self.request(method='PUT', path='/file/%s/database/refresh' % (
'f' * len(fileId)), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid file', resp.json['message'])
resp = self.request(method='PUT', path='/file/%s/database/refresh' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['refreshed'], False)
# Get fields so we will have something to refresh
resp = self.request(path='/file/%s/database/fields' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
resp = self.request(method='PUT', path='/file/%s/database/refresh' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['refreshed'], True)
resp = self.request(method='PUT', path='/file/%s/database/refresh' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['refreshed'], False)
def testFileDatabaseView(self):
# Test that we can get data from a view (this is the same as accessing
# a table without a primary key)
fileId, fileId2, fileId3 = self._setupDbFiles()
params = {'table': 'geometry_columns'}
resp = self.request(method='POST', path='/file/%s/database' % (
fileId, ), user=self.admin, type='application/json',
body=json.dumps(params))
self.assertStatusOk(resp)
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json['data']), 10)
self.assertGreater(resp.json['datacount'], 10)
self.assertEqual(len(resp.json['columns']), len(resp.json['fields']))
def testFileDatabaseSelectBasic(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
resp = self.request(path='/file/%s/database/select' % (
fileId3, ), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('not a database link', resp.json['message'])
resp = self.request(path='/file/notafile/database/select',
user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid ObjectId', resp.json['message'])
resp = self.request(path='/file/%s/database/select' % (
'f' * len(fileId)), user=self.admin)
self.assertStatus(resp, 400)
self.assertIn('Invalid file', resp.json['message'])
# Test the default query
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 50)
self.assertEqual(resp.json['datacount'], 50)
self.assertEqual(len(resp.json['columns']), len(resp.json['fields']))
# Test limit and offset using a basic sort
params = {'sort': 'town', 'limit': 5}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['datacount'], 5)
self.assertEqual(len(resp.json['columns']), len(resp.json['fields']))
lastData = resp.json
params['offset'] = 2
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][:-2], lastData['data'][2:])
# break the database link
resp = self.request(method='POST', path='/file/%s/database' % (
fileId, ), user=self.admin, type='application/json',
body=json.dumps({'table': '_notpresent'}))
self.assertStatusOk(resp)
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.admin, params=params)
def testFileDatabaseSelectSort(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
# Test a variety of sorts
params = {'sort': 'town', 'limit': 5}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
lastData = resp.json
params = {'sort': 'town', 'sortdir': -1, 'limit': 5}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertNotEqual(resp.json['data'][:1], lastData['data'][:1])
self.assertGreater(resp.json['data'][0][resp.json['columns']['town']],
lastData['data'][0][lastData['columns']['town']])
# Use a json sort specification
params = {'sort': json.dumps(['town']), 'limit': 5}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], lastData['data'])
# This should work fine on file2
resp = self.request(path='/file/%s/database/select' % (
fileId2, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], lastData['data'])
# Use a function
params['sort'] = json.dumps([{
'func': 'mod', 'param': [{'field': 'pop2010'}, 10]},
['town', -1]
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(
int(resp.json['data'][0][resp.json['columns']['pop2010']]) % 10, 0)
self.assertGreater(resp.json['data'][0][resp.json['columns']['town']],
resp.json['data'][1][resp.json['columns']['town']])
# This must not work on file2
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId2, ), user=self.user, params=params)
# Test with bad parameters
params['sort'] = '["not valid json'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must be a JSON list', resp.json['message'])
params['sort'] = json.dumps({'not': ['a', 'list']})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must be a JSON list', resp.json['message'])
params['sort'] = 'unknownfield'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
params['sort'] = json.dumps([['town'], ['unknownfield', -1]])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
def testFileDatabaseSelectFields(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
# Unknown fields aren't allowed
params = {'fields': 'unknown,town', 'limit': 5}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
# a comma separated list works
params['fields'] = 'town,pop2010'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], ['town', 'pop2010'])
self.assertEqual(resp.json['columns'], {'town': 0, 'pop2010': 1})
# extra commas and white space at the ends of field names are allowed
params['fields'] = 'town ,, pop2010 ,,'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], ['town', 'pop2010'])
self.assertEqual(resp.json['columns'], {'town': 0, 'pop2010': 1})
# You can use json instead
params['fields'] = json.dumps(['town', 'pop2010'])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], ['town', 'pop2010'])
self.assertEqual(resp.json['columns'], {'town': 0, 'pop2010': 1})
# Invalid json fails
params['fields'] = '["not valid json",town'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must be a JSON list', resp.json['message'])
# A zero-length list is all of the fields
params['fields'] = json.dumps([])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json['fields']), 2)
# instead of a field name, you can use a function
params['fields'] = json.dumps([
'town',
{'func': 'mod', 'param': [{'field': 'pop2010'}, 10]},
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], json.loads(params['fields']))
self.assertEqual(resp.json['columns'], {'town': 0, 'column_1': 1})
# This must not work on file2
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId2, ), user=self.user, params=params)
# We can use a reference to better find our column
params['fields'] = json.dumps([
'town',
{
'func': 'mod',
'param': [{'field': 'pop2010'}, 10],
'reference': 'popmod'
},
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], json.loads(params['fields']))
self.assertEqual(resp.json['columns'], {'town': 0, 'popmod': 1})
# reference with csv
params['format'] = 'csv'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params, isJson=False)
self.assertStatusOk(resp)
data = self.getBody(resp)
self.assertEqual(data.split('\r\n', 1)[0].split(','), ['town', 'popmod'])
del params['format']
# Distinct and count can always be used as functions
# Distinct must be the first field
params['fields'] = json.dumps([
{'func': 'distinct', 'param': [{'field': 'pop2010'}]},
'town',
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], json.loads(params['fields']))
self.assertEqual(resp.json['columns'], {'town': 1, 'column_0': 0})
# Count will return the tally of the distinct values
params['fields'] = json.dumps([
{'func': 'count', 'param': [{'func': 'distinct', 'param': [{'field': 'pop2010'}]}]},
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], json.loads(params['fields']))
self.assertEqual(resp.json['columns'], {'column_0': 0})
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 348)
# Cast can be used as function
params['format'] = 'GeoJSON'
params['fields'] = json.dumps([{
'func': 'json_build_object', 'param': [
'type', 'Feature', 'geometry', {
'func': 'cast', 'param': [{
'func': 'st_asgeojson', 'param': [{
'func': 'st_transform', 'param': [{'field': 'geom'}, 4326]
}]}, 'JSON']
},
'properties', {
'func': 'json_build_object', 'param': [
'town', {'field': 'town'},
'pop2010', {'field': 'pop2010'}]
}
]
}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
geojson = resp.json
self.assertTrue(isinstance(geojson, dict))
self.assertEqual(geojson['type'], 'FeatureCollection')
self.assertEqual(len(geojson['features']), 5)
self.assertEqual(geojson['features'][0]['type'], 'Feature')
self.assertIn('town', geojson['features'][0]['properties'])
# Test some function handling
del params['format']
params['sort'] = 'town'
params['fields'] = json.dumps([
{'func': 'lower', 'param': {'field': 'town'}}
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'abington')
# This uses 'town' as a value in the first field, not a field
params['fields'] = json.dumps([
{'func': 'lower', 'param': 'town'},
'town'
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'town')
# Function parameters must be fields, values, or other functions
params['fields'] = json.dumps([
{'func': 'lower', 'param': {'unknown': 'town'}}
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
# Fields in functions must exist
params['fields'] = json.dumps([
{'func': 'lower', 'param': {'field': 'unknown'}}
])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
# We don't have to use a function
params['fields'] = json.dumps([{'field': 'town'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
# But it needs to be a field or a function
params['fields'] = json.dumps([{'unknown': 'town'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
def testFileDatabaseSelectFilterViaParams(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
# We can access filters either via the filter parameter or via the name
# of each field optionally suffixed with different operators.
baseParams = {'limit': 5, 'sort': 'town', 'fields': 'town'}
# Exact match
params = mergeDicts(baseParams, {'town': 'BOSTON'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
params = mergeDicts(baseParams, {'town': 'boston'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 0)
# minimum
params = mergeDicts(baseParams, {'town_min': 'BOS'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# search
params = mergeDicts(baseParams, {'town_search': '^bo.*n$'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 3)
self.assertEqual(resp.json['data'][1][0], 'BOSTON')
# compound
params = mergeDicts(baseParams, {
'town_min': 'BOS',
'town_notsearch': '^bo.*n$'
})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertNotEqual(resp.json['data'][0][0], 'BOSTON')
# numeric comparisons are sent as text
params = mergeDicts(baseParams, {'pop2010_min': '150000'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 3)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# you can't use regex or search on numeric types
params = mergeDicts(baseParams, {'pop2010_search': '150000'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('Cannot use search operator on field',
resp.json['message'])
# We should be able to get the same results regardless of whether we
# use not or not_
params = mergeDicts(baseParams, {
'town_min': 'BOS',
'town_notin': 'BOSTON'
})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOURNE')
params = mergeDicts(baseParams, {
'town_min': 'BOS',
'town_not_in': 'BOSTON'
})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOURNE')
def testFileDatabaseSelectFilters(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
params = {'limit': 5, 'sort': 'town', 'fields': 'town'}
params['filters'] = '[not json'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must be a JSON list', resp.json['message'])
params['filters'] = json.dumps({'town': 'BOSTON'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must specify a field or func', resp.json['message'])
params['filters'] = json.dumps([{'town': 'BOSTON'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must specify a field or func', resp.json['message'])
params['filters'] = json.dumps([{'field': 'town', 'value': 'BOSTON'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# Test a single filter
params['filters'] = json.dumps({'field': 'town', 'value': 'BOSTON'})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# Test have the value first
params['filters'] = json.dumps([{
'lvalue': 'BOSTON', 'value': {'field': 'town'}}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# test operators
params['filters'] = json.dumps([{
'field': 'town', 'operator': '>=', 'value': 'BOS'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
params['filters'] = json.dumps([{
'field': 'town', 'operator': 'gt', 'value': 'BOS'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
params['filters'] = json.dumps([{
'field': 'town', 'operator': 'noop', 'value': 'BOS'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('Unknown filter operator', resp.json['message'])
# Functions must be known
params['filters'] = json.dumps([{
'field': 'town', 'value': {'func': 'unknown', 'params': []}
}])
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
# We throw a different error when params is an empty dict
params['filters'] = json.dumps([{
'field': 'town', 'value': {'func': 'unknown', 'param': {}}}])
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
# Test a filter composed of a list
params['filters'] = json.dumps([['town', 'gt', 'BOS']])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
params['filters'] = json.dumps([['town', 'BOSTON']])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
params['filters'] = json.dumps([['town', 'gt', 'BOSTON', 'extra']])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must have two or three components',
resp.json['message'])
# Test a single filter as a list
params['filters'] = json.dumps(['town', 'gt', 'BOS'])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# Fail on an unknown field
params['filters'] = json.dumps([['unknown', 'BOSTON']])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('Filters must be on known fields', resp.json['message'])
# Fail without a value
params['filters'] = json.dumps([{
'field': 'town'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must have a value or rfunc', resp.json['message'])
# Test a right function
params['filters'] = json.dumps([{
'field': 'town', 'rfunc': 'upper', 'rparam': 'boston'}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
# This must not work on file2
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId2, ), user=self.user, params=params)
# Test a set of nested functions
filters = [{
'func': 'st_intersects',
'param': [{
'func': 'st_setsrid',
'param': [{
'func': 'st_makepoint',
'param': [-72, 42.3601]
}, 4326]
}, {
'func': 'st_transform',
'param': [{
'field': 'geom'
}, 4326]
}],
'operator': 'is',
'value': True
}]
params['filters'] = json.dumps(filters)
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'RUTLAND')
# Test nested filters
params['filters'] = json.dumps([{
'group': 'not',
'value': [['pop2010', '<', '100'], ['pop2010', '>', '400000']]}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('Filter group badly formed', resp.json['message'])
params['filters'] = json.dumps([{'or': [
['pop2010', '<', '100'], ['pop2010', '>', '400000']]}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 2)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
self.assertEqual(resp.json['data'][1][0], 'GOSNOLD')
params['filters'] = json.dumps([{
'group': 'or',
'value': [['pop2010', '<', '100'], ['pop2010', '>', '400000']]}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 2)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
self.assertEqual(resp.json['data'][1][0], 'GOSNOLD')
params['filters'] = json.dumps(
[['pop2010', '<', '4000'], ['pop2010', '>', '3700']])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 2)
self.assertEqual(resp.json['data'][0][0], 'TISBURY')
self.assertEqual(resp.json['data'][1][0], 'WEST BROOKFIELD')
params['filters'] = json.dumps({
'group': 'and',
'value': [['pop2010', '<', '4000'], ['pop2010', '>', '3700']]})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 2)
self.assertEqual(resp.json['data'][0][0], 'TISBURY')
self.assertEqual(resp.json['data'][1][0], 'WEST BROOKFIELD')
params['filters'] = json.dumps({
'and': [['pop2010', '<', '4000'], ['pop2010', '>', '3700']]})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 2)
self.assertEqual(resp.json['data'][0][0], 'TISBURY')
self.assertEqual(resp.json['data'][1][0], 'WEST BROOKFIELD')
params['filters'] = json.dumps({'or': ['town', 'BOSTON']})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
params['filters'] = json.dumps({'or': []})
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
# test double nesting
params['filters'] = json.dumps([{
'group': 'or',
'value': [
['pop2010', '<', '100'],
['pop2010', '>', '400000'],
{'and': [['pop2010', '<', '4000'], ['pop2010', '>', '3700']]},
]}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 4)
self.assertEqual(resp.json['data'][0][0], 'BOSTON')
self.assertEqual(resp.json['data'][1][0], 'GOSNOLD')
self.assertEqual(resp.json['data'][2][0], 'TISBURY')
self.assertEqual(resp.json['data'][3][0], 'WEST BROOKFIELD')
params['filters'] = json.dumps([{
'group': 'or',
'value': [
['pop2010', '<', '100'],
['pop2010', '>', '400000'],
{'or': ['town', 'ABINGTON']},
]}])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 3)
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
self.assertEqual(resp.json['data'][1][0], 'BOSTON')
self.assertEqual(resp.json['data'][2][0], 'GOSNOLD')
# is None
params['filters'] = json.dumps(['fourcolor', 'is', None])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 1)
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
# not_is None
params['filters'] = json.dumps(['fourcolor', 'isnot', None])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['data']), 5)
self.assertEqual(resp.json['data'][0][0], 'ACTON')
def testFileDatabaseSelectGroup(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
params = {
'sort': json.dumps([
[{'func': 'count', 'param': {'field': 'town'}}, -1],
[{'func': 'max', 'param': {'field': 'town'}}, 1]]),
'fields': json.dumps([
{'func': 'max', 'param': {'field': 'town'}},
'pop2010',
{'func': 'count', 'param': {'field': 'town'}}]),
'limit': 5
}
# Unknown fields aren't allowed
params['group'] = 'unknown,town'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must use known fields', resp.json['message'])
# Invalid json fails
params['group'] = '["not valid json",town'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('must be a JSON list', resp.json['message'])
# A valid field works
params['group'] = 'pop2010'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['datacount'], 5)
self.assertEqual(resp.json['data'][0][0], 'DEDHAM')
self.assertEqual(resp.json['data'][0][2], 2)
self.assertEqual(resp.json['data'][4][0], 'ACTON')
self.assertEqual(resp.json['data'][4][2], 1)
# Multi-grouping works, too. Using json
params['group'] = json.dumps(['pop2010', 'popch80_90'])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['datacount'], 5)
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
# The list can be plain text
params['group'] = 'pop2010,popch80_90'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
# extra commas and white space at the ends of field names are allowed
params['group'] = 'pop2010 ,, popch80_90 ,, '
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
def testFileDatabaseSelectFormats(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
params = {
'sort': 'town',
'limit': 5,
'fields': 'town,pop2010,shape_len,type'
}
params['fields'] = 'town,pop2010,shape_len,type'
# Unknown format
params['format'] = 'unknownFormat'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatus(resp, 400)
self.assertIn('Unknown output format', resp.json['message'])
# List format
params['format'] = 'list'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], [
'town', 'pop2010', 'shape_len', 'type'])
self.assertEqual(resp.json['columns'], {
'town': 0, 'pop2010': 1, 'shape_len': 2, 'type': 3})
self.assertTrue(isinstance(resp.json['data'][0], list))
self.assertEqual(resp.json['data'][0][0], 'ABINGTON')
# Dict format
params['format'] = 'dict'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], [
'town', 'pop2010', 'shape_len', 'type'])
self.assertEqual(resp.json['columns'], {
'town': 0, 'pop2010': 1, 'shape_len': 2, 'type': 3})
self.assertTrue(isinstance(resp.json['data'][0], dict))
self.assertEqual(set(resp.json['data'][0].keys()),
set(['town', 'pop2010', 'shape_len', 'type']))
self.assertEqual(resp.json['data'][1]['town'], 'ACTON')
# Capitalization doesn't matter
params['format'] = 'DICT'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['fields'], [
'town', 'pop2010', 'shape_len', 'type'])
self.assertEqual(resp.json['columns'], {
'town': 0, 'pop2010': 1, 'shape_len': 2, 'type': 3})
self.assertTrue(isinstance(resp.json['data'][0], dict))
self.assertEqual(set(resp.json['data'][0].keys()),
set(['town', 'pop2010', 'shape_len', 'type']))
self.assertEqual(resp.json['data'][2]['town'], 'ACUSHNET')
# csv format
params['format'] = 'csv'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params, isJson=False)
self.assertStatusOk(resp)
data = self.getBody(resp)
self.assertEqual(len(data.split('\r\n')), 7)
self.assertEqual(data.split('\r\n')[0], params['fields'])
self.assertEqual(data.split('\r\n')[4].split(',')[0], 'ADAMS')
# JSON simple format
params['format'] = 'json'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[4]['town'], 'AGAWAM')
# JSON Lines format
params['format'] = 'JSON_Lines'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params, isJson=False)
self.assertStatusOk(resp)
data = self.getBody(resp)
self.assertEqual(len(data.split('\n')), 6)
self.assertEqual(set(json.loads(data.split('\n')[0]).keys()),
set(['town', 'pop2010', 'shape_len', 'type']))
self.assertEqual(json.loads(data.split('\n')[0])['town'], 'ABINGTON')
# GeoJSON format
params['format'] = 'GeoJSON'
geojsonfield = {'func': 'ST_AsGeoJSON', 'param': [{
'func': 'st_transform', 'param': [{'field': 'geom'}, 4326]
}]}
params['fields'] = json.dumps([geojsonfield])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
geojson = resp.json
self.assertTrue(isinstance(geojson, dict))
self.assertEqual(geojson['type'], 'GeometryCollection')
self.assertEqual(len(geojson['geometries']), 5)
self.assertEqual(geojson['geometries'][0]['type'], 'MultiPolygon')
self.assertIn('coordinates', geojson['geometries'][0])
# We should discard the non-geojson fields
params['fields'] = json.dumps(['town', geojsonfield, 'pop2010'])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json, geojson)
params['fields'] = 'town,pop2010,shape_len,type'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['geometries']), 0)
def testFileDatabaseSelectClient(self):
fileId, fileId2, fileId3 = self._setupDbFiles()
params = {'sort': 'town', 'limit': 1, 'clientid': 'test'}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
from girder.plugins.database_assetstore import dbs
sessions = dbs.base._connectorCache[fileId].sessions
# We should be tracking the a session for 'test'
self.assertIn('test', sessions)
self.assertFalse(sessions['test']['used'])
last = sessions['test'].copy()
# A new request should update the last used time
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertGreater(sessions['test']['last'], last['last'])
self.assertEqual(sessions['test']['session'], last['session'])
# Artifically age the last session and test that we get a new session
last = sessions['test'].copy()
sessions['test']['last'] -= 305 # 300 is the default expiry age
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertNotEqual(sessions['test']['session'], last['session'])
# Send a slow query in a thread. Use pg_sleep, as it produces more
# consistent tests. Before, we were using
# {'func': 'st_hausdorffdistance', 'param': [
# {'func': 'st_minimumboundingcircle', 'param': {
# 'field': 'geom'}},
# {'field': 'geom'},
# 0.03 + 0.01 * random.random()]},
# whiched used a random number as part of the query to prevent
# caching of the results. This would occasionally fully process
# instead of getting canceled.
# Whitelist pg_sleep for this test
from girder.plugins.database_assetstore import dbs, assetstore
connector = dbs.getDBConnector(fileId, assetstore.getDbInfoForFile(
self.file1))
connector._allowedFunctions['pg_sleep'] = True
slowParams = params.copy()
slowParams['fields'] = json.dumps([
'town',
{'func': 'pg_sleep', 'param': [40]},
])
slowParams['limit'] = 500
slowResults = {}
def slowQuery(params):
try:
self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=slowParams)
except Exception as exc:
slowResults['exc'] = repr(exc)
slow = threading.Thread(target=slowQuery, kwargs={
'params': params
})
slow.start()
# Wait for the query to start
while not sessions['test']['used'] and slow.is_alive():
time.sleep(0.05)
# Sending a normal request should cancel the slow one and respond
# promptly
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
# The slow request should be cancelled
slow.join()
self.assertTrue(
'canceling statement due to user' in slowResults['exc'] or
'Internal server error' in slowResults['exc'] or
'InterruptedException' in slowResults['exc'])
def testFileDatabaseSelectPolling(self):
# Create a test database connector so we can check polling
from girder.plugins.database_assetstore import dbs
dbInfo = {
'queries': 0,
'data': [[1]],
'format': 'list'
}
class TestConnector(dbs.base.DatabaseConnector):
name = 'test'
def __init__(self, *args, **kwargs):
super(TestConnector, self).__init__(*args, **kwargs)
self.initialized = True
def getFieldInfo(self):
return [{'name': 'test', 'type': 'number'}]
def performSelect(self, *args, **kwargs):
dbInfo['queries'] += 1
if dbInfo['data'] is None:
return
results = super(TestConnector, self).performSelect(
*args, **kwargs)
results['data'] = dbInfo['data']
results['format'] = dbInfo['format']
return results
@staticmethod
def validate(*args, **kwargs):
return True
dbs.base.registerConnectorClass(TestConnector.name, TestConnector, {})
fileId, fileId2, fileId3 = self._setupDbFiles({
'dbtype': 'test', 'dburi': 'test://nowhere/nowhere'})
params = {}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], [[1]])
params = {'wait': 1}
# Waiting shouldn't affect the results since there is data available
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], [[1]])
# If no data is available for the wait duration, we can get a null
# response
dbInfo['data'].pop()
lastCount = dbInfo['queries']
params = {'wait': 0.01, 'poll': 0.01}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json.get('data', []), [])
self.assertEqual(dbInfo['queries'], lastCount + 2)
# We should be able to wait for results
def addData(delay, value):
time.sleep(delay)
dbInfo['data'].append([value])
add = threading.Thread(target=addData, args=(1, 2))
add.start()
lastCount = dbInfo['queries']
params = {'initwait': 0.3, 'poll': 0.1, 'wait': 10}
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
self.assertStatusOk(resp)
# Don't depend on exact counts, as the test could be slow
self.assertEqual(resp.json['data'], [[2]])
self.assertGreater(dbInfo['queries'], lastCount + 3)
self.assertLess(dbInfo['queries'], lastCount + 9)
add.join()
# Test if we have bad data we get an exception
dbInfo['data'] = None
with self.assertRaises(Exception):
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params=params)
# Test that we can handle different data formats
dbInfo['data'] = [{'test': 1}]
dbInfo['format'] = 'dict'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], [[1]])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params={'format': 'dict'})
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], [{'test': 1}])
dbInfo['data'] = [[1]]
dbInfo['format'] = 'list'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], [[1]])
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user, params={'format': 'dict'})
self.assertStatusOk(resp)
self.assertEqual(resp.json['data'], [{'test': 1}])
dbInfo['data'] = [(1, )]
dbInfo['format'] = 'unknown'
resp = self.request(path='/file/%s/database/select' % (
fileId, ), user=self.user)
self.assertStatus(resp, 400)
self.assertIn('Unknown internal format', resp.json['message'])
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from urlparse import urlparse
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from webdriver_pages import settings
class PasswordTest(pyauto.PyUITest):
"""Tests that passwords work correctly."""
INFOBAR_TYPE = 'password_infobar'
URL = 'https://accounts.google.com/ServiceLogin'
URL_HTTPS = 'https://accounts.google.com/Login'
URL_LOGOUT = 'https://accounts.google.com/Logout'
HOSTNAME = 'https://' + urlparse(URL).netloc + '/'
USERNAME_ELEM = 'Email'
PASSWORD_ELEM = 'Passwd'
USERNAME = 'test@google.com'
PASSWORD = 'test.password'
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to dump passwords. ')
print '*' * 20
self.pprint(self.GetSavedPasswords())
def setUp(self):
pyauto.PyUITest.setUp(self)
self.assertFalse(self.GetSavedPasswords())
def _AssertWithinOneSecond(self, time1, time2):
self.assertTrue(abs(time1 - time2) < 1.0,
'Times not within an acceptable range. '
'First was %lf, second was %lf' % (time1, time2))
def _ConstructPasswordDictionary(self, username_value, password_value,
signon_realm, origin_url, username_element,
password_element, action_target,
time=1279650942.0, submit_element='submit',
blacklist=False):
"""Construct a password dictionary with all the required fields."""
return {'username_value': username_value,
'password_value': password_value,
'signon_realm': signon_realm,
'time': time,
'origin_url': origin_url,
'username_element': username_element,
'password_element': password_element,
'submit_element': submit_element,
'action_target': action_target,
'blacklist': blacklist}
def _ClickOnLoginPage(self, window_index, tab_index):
# In some cases (such as on Windows) the current page displays an account
# name and e-mail, rather than an e-mail and password. Clicking on a
# particular DOM element causes the e-mail and password to be displayed.
click_js = """
var elements = document.getElementsByClassName("accounts");
if (elements && elements.length > 0) {
elements = elements[0].getElementsByTagName("p");
if (elements && elements.length > 0)
elements[0].onclick();
}
window.domAutomationController.send("done");
"""
self.ExecuteJavascript(click_js, tab_index, window_index)
# Wait until username/password is filled by the Password manager on the
# login page.
js_template = """
var value = "";
var element = document.getElementById("%s");
if (element)
value = element.value;
window.domAutomationController.send(value);
"""
self.assertTrue(self.WaitUntil(
lambda: self.ExecuteJavascript(js_template % self.USERNAME_ELEM,
tab_index, window_index) != '' and
self.ExecuteJavascript(js_template % self.PASSWORD_ELEM,
tab_index, window_index) != ''))
def testSavePassword(self):
"""Test saving a password and getting saved passwords."""
password1 = self._ConstructPasswordDictionary(
'user@example.com', 'test.password',
'https://www.example.com/', 'https://www.example.com/login',
'username', 'password', 'https://www.example.com/login/')
self.assertTrue(self.AddSavedPassword(password1))
self.assertEqual(self.GetSavedPasswords(), [password1])
def testRemovePasswords(self):
"""Verify that saved passwords can be removed."""
password1 = self._ConstructPasswordDictionary(
'user1@example.com', 'test1.password',
'https://www.example.com/', 'https://www.example.com/login',
'username1', 'password', 'https://www.example.com/login/')
password2 = self._ConstructPasswordDictionary(
'user2@example.com', 'test2.password',
'https://www.example.com/', 'https://www.example.com/login',
'username2', 'password2', 'https://www.example.com/login/')
self.AddSavedPassword(password1)
self.AddSavedPassword(password2)
self.assertEquals(2, len(self.GetSavedPasswords()))
self.assertEquals([password1, password2], self.GetSavedPasswords())
self.RemoveSavedPassword(password1)
self.assertEquals(1, len(self.GetSavedPasswords()))
self.assertEquals([password2], self.GetSavedPasswords())
self.RemoveSavedPassword(password2)
# TODO: GetSavedPasswords() doesn't return anything when empty.
# http://crbug.com/64603
# self.assertFalse(self.GetSavedPasswords())
def testDisplayAndSavePasswordInfobar(self):
"""Verify password infobar displays and able to save password."""
test_utils.ClearPasswords(self)
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
test_utils.GoogleAccountsLogin(self, username, password)
# Wait until page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.NavigateToURL(self.URL_LOGOUT)
self.NavigateToURL(self.URL_HTTPS)
self._ClickOnLoginPage(0, 0)
test_utils.VerifyGoogleAccountCredsFilled(self, username, password,
tab_index=0, windex=0)
test_utils.ClearPasswords(self)
def testNeverSavePasswords(self):
"""Verify passwords not saved/deleted when 'never for this site' chosen."""
creds1 = self.GetPrivateInfo()['test_google_account']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
test_utils.GoogleAccountsLogin(
self, creds1['username'], creds1['password'])
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.assertEquals(1, len(self.GetSavedPasswords()))
self.AppendTab(pyauto.GURL(creds1['logout_url']))
creds2 = self.GetPrivateInfo()['test_google_account_2']
test_utils.GoogleAccountsLogin(
self, creds2['username'], creds2['password'], tab_index=1)
# Selecting 'Never for this site' option on password infobar.
self.PerformActionOnInfobar(
'cancel', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE, tab_index=1), tab_index=1)
# TODO: GetSavedPasswords() doesn't return anything when empty.
# http://crbug.com/64603
# self.assertFalse(self.GetSavedPasswords())
# TODO: Check the exceptions list
def testSavedPasswordInTabsAndWindows(self):
"""Verify saved username/password shows in window and tab."""
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
# Login to Google a/c
test_utils.GoogleAccountsLogin(self, username, password)
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.NavigateToURL(self.URL_LOGOUT)
self.NavigateToURL(self.URL)
self._ClickOnLoginPage(0, 0)
test_utils.VerifyGoogleAccountCredsFilled(self, username, password,
tab_index=0, windex=0)
self.AppendTab(pyauto.GURL(self.URL))
self._ClickOnLoginPage(0, 1)
test_utils.VerifyGoogleAccountCredsFilled(self, username, password,
tab_index=1, windex=0)
test_utils.ClearPasswords(self)
def testLoginCredsNotShownInIncognito(self):
"""Verify login creds are not shown in Incognito mode."""
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
# Login to Google account.
test_utils.GoogleAccountsLogin(self, username, password)
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.NavigateToURL(self.URL_LOGOUT)
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(self.URL, 1, 0)
email_value = self.GetDOMValue('document.getElementById("Email").value',
tab_index=0, windex=1)
passwd_value = self.GetDOMValue('document.getElementById("Passwd").value',
tab_index=0, windex=1)
self.assertEqual(email_value, '',
msg='Email creds displayed %s.' % email_value)
self.assertEqual(passwd_value, '', msg='Password creds displayed.')
def testPasswordAutofilledInIncognito(self):
"""Verify saved password is autofilled in Incognito mode.
Saved passwords should be autofilled once the username is entered in
incognito mode.
"""
action_target = self.HOSTNAME
driver = self.NewWebDriver()
password_dict = self._ConstructPasswordDictionary(
self.USERNAME, self.PASSWORD, self.HOSTNAME, self.URL,
self.USERNAME_ELEM, self.PASSWORD_ELEM, action_target)
self.AddSavedPassword(password_dict)
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(self.URL, 1, 0)
# Switch to window 1.
driver.switch_to_window(driver.window_handles[1])
driver.find_element_by_id(
self.USERNAME_ELEM).send_keys(self.USERNAME + '\t')
incognito_passwd = self.GetDOMValue(
'document.getElementById("Passwd").value', tab_index=0, windex=1)
self.assertEqual(incognito_passwd, self.PASSWORD,
msg='Password creds did not autofill in incognito mode.')
def testInfoBarDisappearByNavigatingPage(self):
"""Test password infobar is dismissed when navigating to different page."""
creds = self.GetPrivateInfo()['test_google_account']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
# Login to Google account.
test_utils.GoogleAccountsLogin(self, creds['username'], creds['password'])
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.NavigateToURL('chrome://version')
self.assertTrue(self.WaitForInfobarCount(0))
# To make sure user is navigated to Version page.
self.assertTrue(self.WaitUntil(self.GetActiveTabTitle,
expect_retval='About Version'))
test_utils.AssertInfobarTypeDoesNotAppear(self, self.INFOBAR_TYPE)
def testInfoBarDisappearByReload(self):
"""Test that Password infobar disappears by the page reload."""
creds = self.GetPrivateInfo()['test_google_account']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
# Login to Google a/c
test_utils.GoogleAccountsLogin(self, creds['username'], creds['password'])
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.GetBrowserWindow(0).GetTab(0).Reload()
test_utils.AssertInfobarTypeDoesNotAppear(self, self.INFOBAR_TYPE)
def testPasswdInfoNotStoredWhenAutocompleteOff(self):
"""Verify that password infobar does not appear when autocomplete is off.
If the password field has autocomplete turned off, then the password infobar
should not offer to save the password info.
"""
password_info = {'Email': self.USERNAME,
'Passwd': self.PASSWORD}
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
url = self.GetHttpURLForDataPath(
os.path.join('password', 'password_autocomplete_off_test.html'))
self.NavigateToURL(url)
for key, value in password_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
self.assertTrue(self.SubmitForm('loginform'))
test_utils.AssertInfobarTypeDoesNotAppear(self, self.INFOBAR_TYPE)
def _SendCharToPopulateField(self, char, tab_index=0, windex=0):
"""Simulate a char being typed into a field.
Args:
char: the char value to be typed into the field.
tab_index: tab index to work on. Defaults to 0 (first tab).
windex: window index to work on. Defaults to 0 (first window).
"""
CHAR_KEYPRESS = ord((char).upper()) # ASCII char key press.
KEY_DOWN_TYPE = 0 # kRawKeyDownType
KEY_UP_TYPE = 3 # kKeyUpType
self.SendWebkitKeyEvent(KEY_DOWN_TYPE, CHAR_KEYPRESS, tab_index, windex)
self.SendWebkitCharEvent(char, tab_index, windex)
self.SendWebkitKeyEvent(KEY_UP_TYPE, CHAR_KEYPRESS, tab_index, windex)
def testClearFetchedCredForNewUserName(self):
"""Verify that the fetched credentials are cleared for a new username.
This test requires sending key events rather than pasting a new username
into the Email field.
"""
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
# Disable one-click login infobar for sync.
self.SetPrefs(pyauto.kReverseAutologinEnabled, False)
# Login to Google a/c
test_utils.GoogleAccountsLogin(self, username, password)
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self.NavigateToURL(self.URL_LOGOUT)
self.NavigateToURL(self.URL)
self._ClickOnLoginPage(0, 0)
test_utils.VerifyGoogleAccountCredsFilled(self, username, password,
tab_index=0, windex=0)
clear_username_field = (
'document.getElementById("Email").value = ""; '
'window.domAutomationController.send("done");')
set_focus = (
'document.getElementById("Email").focus(); '
'window.domAutomationController.send("done");')
self.ExecuteJavascript(clear_username_field, 0, 0)
self.ExecuteJavascript(set_focus, 0, 0)
self._SendCharToPopulateField('t', tab_index=0, windex=0)
passwd_value = self.GetDOMValue('document.getElementById("Passwd").value')
self.assertFalse(passwd_value,
msg='Password field not empty for new username.')
test_utils.ClearPasswords(self)
def testPasswordInfobarShowsForBlockedDomain(self):
"""Verify that password infobar shows when cookies are blocked.
Password infobar should be shown if cookies are blocked for Google
accounts domain.
"""
creds = self.GetPrivateInfo()['test_google_account']
username = creds['username']
password = creds['password']
# Block cookies for Google accounts domain.
self.SetPrefs(pyauto.kContentSettingsPatternPairs,
{'https://accounts.google.com/': {'cookies': 2}})
test_utils.GoogleAccountsLogin(self, username, password)
test_utils.WaitForInfobarTypeAndGetIndex(self, self.INFOBAR_TYPE)
if __name__ == '__main__':
pyauto_functional.Main()
|
|
import json
import urllib2
from directories import userCachePath, getDataDir
import os
import time
import base64 # @UnusedImport
from pymclevel.mclevelbase import PlayerNotFound
import urllib
from PIL import Image
from urllib2 import HTTPError
import atexit
import threading
import traceback
import logging
logger = logging.getLogger()
def deprecated(func):
def new_func(*args, **kwargs):
#logger.warn("Function \""+str(func.__name__)+"\" is deprecated and should not be used")
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
#def getPlayerSkinURL(uuid):
# try:
# playerJSONResponse = urllib2.urlopen('https://sessionserver.mojang.com/session/minecraft/profile/{}'.format(uuid))
# print playerJSONResponse
# texturesJSON = json.loads(playerJSONResponse)['properties']
# for prop in properties:
# if prop['name'] == 'textures':
# b64 = base64.b64decode(prop['value']);
# print b64
# return json.loads(b64)['textures']['SKIN']['url']
# except:
# raise
#print getPlayerSkinURL('4566e69fc90748ee8d71d7ba5aa00d20')
class __PlayerCache:
SUCCESS = 0
FAILED = 1
def __convert(self):
jsonFile = None
fp = open(userCachePath)
try:
jsonFile = json.load(fp)
fp.close()
except ValueError:
fp.close()
# Assuming JSON file is corrupted, deletes file and creates new one
os.remove(userCachePath)
with open(userCachePath, 'w') as json_out:
json.dump([], json_out)
if jsonFile is not None:
for old_player in jsonFile.keys():
player = jsonFile[old_player]
new_player = {"Playername": player["username"], "UUID (No Separator)": old_player.replace("-", ""),
"UUID (Separator)": old_player, "WasSuccessful": True, "Timstamp": player["timestamp"]}
self._playerCacheList.append(new_player)
self._save()
print "Convert usercache.json"
def fixAllOfPodshotsBugs(self):
for player in self._playerCacheList:
if "Timstamp" in player:
player["Timestamp"] = player["Timstamp"]
del player["Timstamp"]
self._save()
def __init__(self):
self._playerCacheList = []
if not os.path.exists(userCachePath):
out = open(userCachePath, 'w')
json.dump(self._playerCacheList, out)
out.close()
f = open(userCachePath, 'r')
line = f.readline()
if line.startswith("{"):
f.close()
self.__convert()
f.close()
try:
json_in = open(userCachePath)
self._playerCacheList = json.load(json_in)
except:
logger.warning("Usercache.json may be corrupted")
self._playerCacheList = []
finally:
json_in.close()
self.fixAllOfPodshotsBugs()
self.refresh_lock = threading.Lock()
self.player_refreshing = threading.Thread(target=self._refreshAll)
self.player_refreshing.daemon = True
self.player_refreshing.start()
#self._refreshAll()
def _save(self):
out = open(userCachePath, "w")
json.dump(self._playerCacheList, out, indent=4, separators=(',', ':'))
out.close()
def _removePlayerWithName(self, name):
toRemove = None
for p in self._playerCacheList:
if p["Playername"] == name:
toRemove = p
if toRemove is not None:
self._playerCacheList.remove(toRemove)
self._save()
def _removePlayerWithUUID(self, uuid, seperator=True):
toRemove = None
for p in self._playerCacheList:
if seperator:
if p["UUID (Separator)"] == uuid:
toRemove = p
else:
if p["UUID (No Separator)"] == uuid:
toRemove = p
if toRemove is not None:
self._playerCacheList.remove(toRemove)
self._save()
def nameInCache(self, name):
isInCache = False
for p in self._playerCacheList:
if p["Playername"] == name:
isInCache = True
return isInCache
def uuidInCache(self, uuid, seperator=True):
isInCache = False
for p in self._playerCacheList:
if seperator:
if p["UUID (Separator)"] == uuid:
isInCache = True
else:
if p["UUID (No Separator)"] == uuid:
isInCache = True
return isInCache
def _refreshAll(self):
with self.refresh_lock:
playersNeededToBeRefreshed = []
try:
t = time.time()
except:
t = 0
for player in self._playerCacheList:
if player["Timestamp"] != "<Invalid>":
if t - player["Timestamp"] > 21600:
playersNeededToBeRefreshed.append(player)
for player in playersNeededToBeRefreshed:
self.getPlayerFromUUID(player["UUID (Separator)"], forceNetwork=True, dontSave=True)
self._save()
def force_refresh(self):
players = self._playerCacheList
for player in players:
self.getPlayerFromUUID(player["UUID (Separator)"], forceNetwork=True)
@deprecated
def getPlayerFromUUID(self, uuid, forceNetwork=False, dontSave=False):
player = {}
response = None
if forceNetwork:
if self.uuidInCache(uuid):
self._removePlayerWithUUID(uuid)
try:
response = urllib2.urlopen("https://sessionserver.mojang.com/session/minecraft/profile/{}".format(uuid.replace("-",""))).read()
except urllib2.URLError:
return uuid
if response is not None and response != "":
playerJSON = json.loads(response)
player["Playername"] = playerJSON["name"]
player["UUID (No Separator)"] = playerJSON["id"]
player["UUID (Separator)"] = uuid
player["WasSuccessful"] = True
player["Timestamp"] = time.time()
self._playerCacheList.append(player)
if not dontSave:
self._save()
return playerJSON["name"]
else:
return uuid
else:
couldNotFind = False
for p in self._playerCacheList:
if p["UUID (Separator)"] == uuid and p["WasSuccessful"]:
couldNotFind = False
return p["Playername"]
else:
couldNotFind = True
if couldNotFind:
result = self.getPlayerFromUUID(uuid, forceNetwork=True)
if result == uuid:
player = {"Playername":"<Unknown>","UUID (Separator)":uuid,"UUID (No Separator)":uuid.replace("-",""),"Timestamp":"<Invalid>","WasSuccessful":False}
self._playerCacheList.append(player)
return uuid
@deprecated
def getPlayerFromPlayername(self, playername, forceNetwork=False, separator=True):
response = None
if forceNetwork:
if self.nameInCache(playername):
self._removePlayerWithName(playername)
try:
response = urllib2.urlopen("https://api.mojang.com/users/profiles/minecraft/{}".format(playername)).read()
except urllib2.URLError:
return playername
if response is not None and response != "":
playerJSON = json.loads(response)
player = {"Playername": playername, "UUID (No Separator)": playerJSON["id"]}
uuid = playerJSON["id"][:8]+"-"+playerJSON["id"][8:12]+"-"+playerJSON["id"][12:16]+"-"+playerJSON["id"][16:20]+"-"+playerJSON["id"][20:]
player["UUID (Separator)"] = uuid
player["WasSuccessful"] = True
player["Timestamp"] = time.time()
self._playerCacheList.append(player)
self._save()
if separator:
return uuid
else:
return playerJSON["id"]
else:
return playername
else:
couldNotFind = False
for p in self._playerCacheList:
if p["Playername"] == playername and p["WasSuccessful"]:
couldNotFind = False
return p["UUID (Separator)"]
else:
couldNotFind = True
if couldNotFind:
result = self.getPlayerFromPlayername(playername, forceNetwork=True)
if result == playername:
player = {"Playername":playername,"UUID (Separator)":"<Unknown>","UUID (No Separator)":"<Unknown>","Timestamp":"<Invalid>","WasSuccessful":False}
self._playerCacheList.append(player)
return playername
# 0 if for a list of the playernames, 1 is for a dictionary of all player data
def getAllPlayersKnown(self, returnType=0, include_failed_lookups=False):
toReturn = None
if returnType == 0:
toReturn = []
for p in self._playerCacheList:
if p["WasSuccessful"]:
toReturn.append(p["Playername"])
elif include_failed_lookups:
toReturn.append(p["Playername"])
elif returnType == 1:
toReturn = {}
for p in self._playerCacheList:
if p["WasSuccessful"]:
toReturn[p["Playername"]] = p
elif include_failed_lookups:
toReturn[p["Playername"]] = p
return toReturn
def getFromCacheUUID(self, uuid, seperator=True):
for player in self._playerCacheList:
if seperator and player["UUID (Separator)"] == uuid:
return player["UUID (Separator)"], player["Playername"], player["UUID (No Separator)"]
elif player["UUID (No Separator)"] == uuid:
return player["UUID (Separator)"], player["Playername"], player["UUID (No Separator)"]
def getFromCacheName(self, name):
for player in self._playerCacheList:
if name == player["Playername"]:
return player["UUID (Separator)"], player["Playername"], player["UUID (No Separator)"]
def getPlayerInfo(self, arg, force=False):
if arg.count('-') == 4:
if self.uuidInCache(arg) and not force:
return self.getFromCacheUUID(arg)
else:
return self._getPlayerInfoUUID(arg)
else:
if self.nameInCache(arg) and not force:
return self.getFromCacheName(arg)
else:
return self._getPlayerInfoName(arg)
def _getPlayerInfoUUID(self, uuid):
response_name = None
response_uuid = None
player = {}
if self.uuidInCache(uuid):
self._removePlayerWithUUID(uuid)
try:
response_uuid = json.loads(urllib2.urlopen("https://sessionserver.mojang.com/session/minecraft/profile/{}".format(uuid.replace("-", ""))).read())
response_name = json.loads(urllib2.urlopen("https://api.mojang.com/users/profiles/minecraft/{}".format(response_uuid["name"])).read())
except urllib2.URLError:
return uuid
except ValueError:
print "Caught value error while getting player info for "+uuid
return uuid
if response_name is not None and response_name != "" and response_uuid is not None and response_uuid != "":
player["Playername"] = response_name["name"]
player["UUID (Separator)"] = response_name["id"][:8]+"-"+response_name["id"][8:12]+"-"+response_name["id"][12:16]+"-"+response_name["id"][16:20]+"-"+response_name["id"][20:]
player["UUID (No Separator)"] = response_name["id"]
player["WasSuccessful"] = True
player["Timestamp"] = time.time()
self._playerCacheList.append(player)
self._save()
return player["UUID (Separator)"], player["Playername"], player["UUID (No Separator)"]
else:
return uuid
#raise Exception("Couldn't find player")
def _getPlayerInfoName(self, playername):
response_name = None
response_uuid = None
player = {}
if self.nameInCache(playername):
self._removePlayerWithName(playername)
try:
response_name = json.loads(urllib2.urlopen("https://api.mojang.com/users/profiles/minecraft/{}".format(playername)).read())
response_uuid = json.loads(urllib2.urlopen("https://sessionserver.mojang.com/session/minecraft/profile/{}".format(response_name["id"])).read())
except urllib2.URLError:
return playername
except ValueError:
print "Caught value error while getting player info for "+playername
return playername
if response_name is not None and response_name != "" and response_uuid is not None and response_uuid != "":
player["Playername"] = response_name["name"]
player["UUID (Separator)"] = response_name["id"][:8]+"-"+response_name["id"][8:12]+"-"+response_name["id"][12:16]+"-"+response_name["id"][16:20]+"-"+response_name["id"][20:]
player["UUID (No Separator)"] = response_name["id"]
player["WasSuccessful"] = True
player["Timestamp"] = time.time()
self._playerCacheList.append(player)
self._save()
return player["UUID (Separator)"], player["Playername"], player["UUID (No Separator)"]
else:
return playername
#raise Exception("Couldn't find player")
@staticmethod
def __formats():
player = {
"Playername":"<Username>",
"UUID":"<uuid>",
"Timestamp":"<timestamp>",
# WasSuccessful will be true if the UUID/Player name was retrieved successfully
"WasSuccessful":True
}
pass
def cleanup(self):
remove = []
for player in self._playerCacheList:
if not player["WasSuccessful"]:
remove.append(player)
for toRemove in remove:
self._playerCacheList.remove(toRemove)
self._save()
playercache = __PlayerCache()
def getUUIDFromPlayerName(player, seperator=True, forceNetwork=False):
return playercache.getPlayerFromPlayername(player, forceNetwork, seperator)
'''
if forceNetwork:
try:
playerJSONResponse = urllib2.urlopen("https://api.mojang.com/users/profiles/minecraft/{}".format(player)).read()
playerJSON = json.loads(playerJSONResponse)
if seperator:
return "-".join((playerJSON["id"][:8], playerJSON["id"][8:12], playerJSON["id"][12:16], playerJSON["id"][16:20], playerJSON["id"][20:]))
else:
return playerJSON["id"]
except:
raise PlayerNotFound(player)
else:
try:
t = time.time()
except:
t = 0
try:
if not os.path.exists(userCachePath):
usercache = {}
print "{} doesn't exist, will not cache".format(userCachePath)
else:
try:
f = open(userCachePath,"r+")
usercache = json.loads(f.read())
except:
print "Error loading {} from disk".format(userCachePath)
os.remove(userCachePath)
f = open(userCachePath, 'ar+')
usercache = {}
try:
uuid = [x for x in usercache if usercache[x]["username"].lower() == player.lower()][0]
if os.path.exists(userCachePath) and uuid in usercache and "timestamp" in usercache[uuid] and t-usercache[uuid]["timestamp"] < 21600:
refreshUUID = False
else:
refreshUUID = True
except:
refreshUUID = True
if refreshUUID:
uuid = getUUIDFromPlayerName(player, seperator, True)
try:
usercache[uuid] = {"username":getPlayerNameFromUUID(uuid, True),"timestamp":t}
except:
print "Error updating {} from network. Using last known".format(uuid)
return uuid
try:
if os.path.exists(userCachePath):
f.seek(0)
f.write(json.dumps(usercache))
f.close()
except:
print "Error writing {} to disk".format(userCachePath)
return uuid
except:
print "Error getting the uuid for {}".format(player)
raise PlayerNotFound(player)
'''
def getPlayerNameFromUUID(uuid,forceNetwork=False):
'''
Gets the Username from a UUID
:param uuid: The Player's UUID
:param forceNetwork: Forces use Mojang's API instead of first looking in the usercache.json
'''
return playercache.getPlayerFromUUID(uuid, forceNetwork)
'''
if forceNetwork:
try:
nuuid = uuid.replace("-", "")
playerJSONResponse = urllib2.urlopen("https://api.mojang.com/user/profiles/{}/names".format(nuuid)).read()
playerJSON = json.loads(playerJSONResponse)
return playerJSON[0]
except:
raise PlayerNotFound(uuid)
else:
try:
t = time.time()
except:
t = 0
try:
if not os.path.exists(userCachePath):
usercache = {}
print "{} doesn't exist, will not cache".format(userCachePath)
else:
try:
f = open(userCachePath,"r+")
usercache = json.loads(f.read())
except:
print "Error loading {} from disk".format(userCachePath)
os.remove(userCachePath)
f = open(userCachePath, 'ar+')
usercache = {}
try:
if os.path.exists(userCachePath) and uuid in usercache and "timestamp" in usercache[uuid] and t-usercache[uuid]["timestamp"] < 21600:
refreshUUID = False
else:
refreshUUID = True
except:
refreshUUID = True
if refreshUUID:
try:
usercache[uuid] = {"username":getPlayerNameFromUUID(uuid,True),"timestamp":t}
except:
print "Error loading {} from network".format(uuid)
return uuid
try:
if os.path.exists(userCachePath):
f.seek(0)
f.write(json.dumps(usercache))
f.close()
except:
print "Error writing {} to disk".format(userCachePath)
try:
return usercache[uuid]["username"]
except:
print "Error returning uuid"
return uuid
except:
print "Error getting the username for {}".format(uuid)
return uuid
'''
def getPlayerSkin(uuid, force=False, trying_again=False, instance=None):
SKIN_URL = "http://skins.minecraft.net/MinecraftSkins/{}.png"
toReturn = 'char.png'
try:
os.mkdir("player-skins")
except OSError:
pass
if force or not os.path.exists(os.path.join("player-skins", uuid.replace("-", "_")+".png")):
try:
# Checks to see if the skin even exists
urllib2.urlopen(SKIN_URL.format(playercache.getPlayerFromUUID(uuid, forceNetwork=False)))
except urllib2.URLError as e:
if "Not Found" in e.msg:
return toReturn
try:
if os.path.exists(os.path.join("player-skins", uuid.replace("-", "_")+".png")) and not force:
player_skin = Image.open(os.path.join("player-skins", uuid.replace("-","_")+".png"))
if player_skin.size == (64,64):
player_skin = player_skin.crop((0,0,64,32))
player_skin.save(os.path.join("player-skins", uuid.replace("-","_")+".png"))
toReturn = os.path.join("player-skins", uuid.replace("-","_")+".png")
else:
playername = playercache.getPlayerFromUUID(uuid,forceNetwork=False)
urllib.urlretrieve(SKIN_URL.format(playername), os.path.join("player-skins", uuid.replace("-","_")+".png"))
toReturn = os.path.join("player-skins", uuid.replace("-","_")+".png")
player_skin = Image.open(toReturn)
if player_skin.size == (64,64):
player_skin = player_skin.crop((0,0,64,32))
player_skin.save(os.path.join("player-skins", uuid.replace("-","_")+".png"))
except IOError:
print "Couldn't find Image file ("+str(uuid.replace("-","_")+".png")+") or the file may be corrupted"
print "Trying to re-download skin...."
if not trying_again and instance is not None:
instance.delete_skin(uuid)
os.remove(os.path.join("player-skins", uuid.replace("-","_")+".png"))
toReturn = getPlayerSkin(uuid, force=True, trying_again=True)
pass
except HTTPError:
print "Couldn't connect to a network"
raise Exception("Could not connect to the skins server, please check your Internet connection and try again.")
pass
except Exception:
print "Unknown error occurred while reading/downloading skin for "+str(uuid.replace("-","_")+".png")
pass
return toReturn
def _cleanup():
if os.path.exists("player-skins"):
for image_file in os.listdir("player-skins"):
fp = None
try:
fp = open(os.path.join("player-skins", image_file), 'rb')
Image.open(fp)
except IOError:
fp.close()
os.remove(os.path.join("player-skins", image_file))
playercache.cleanup()
atexit.register(_cleanup)
|
|
import pytest
import os
from flex.exceptions import ValidationError
from flex.loading.schema.paths.path_item.operation.parameters import (
parameters_validator,
)
from flex.validation.parameter import (
validate_parameters,
)
from flex.constants import (
PATH,
STRING,
NUMBER,
BOOLEAN,
FLEX_DISABLE_X_NULLABLE
)
from flex.error_messages import MESSAGES
from tests.utils import assert_message_in_errors
#
# enum validation tests
#
@pytest.mark.parametrize(
'enum,value',
(
([True, False], 0),
([True, False], 1),
([True, False], ''),
([True, False], None),
([0, 1, 2, 3], True),
([0, 1, 2, 3], False),
([0, 1, 2, 3], '1'),
([0, 1, 2, 3], 4),
([0, 1, 2, 3], 1.0),
([0, 1, 2, 3], None),
(['1', '2', 'a', 'b'], 'A'),
(['1', '2', 'a', 'b'], 1),
(['1', '2', 'a', 'b'], 2),
(['1', '2', 'a', 'b'], None),
),
)
def test_enum_validation_with_invalid_values(enum, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description': 'id',
'type': [STRING, NUMBER, BOOLEAN],
'required': True,
'enum': enum,
},
])
parameter_values = {
'id': value,
}
with pytest.raises(ValidationError) as err:
validate_parameters(parameter_values, parameters, {})
assert_message_in_errors(
MESSAGES['enum']['invalid'],
err.value.detail,
'id.enum',
)
@pytest.mark.parametrize(
'enum,value',
(
([True, False], True),
([True, False], False),
([0, 1, 2, 3], 3),
([0, 1, 2, 3], 1),
(['1', '2', 'a', 'b'], 'a'),
(['1', '2', 'a', 'b'], '1'),
),
)
def test_enum_validation_with_allowed_values(enum, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description': 'id',
'type': [STRING, NUMBER, BOOLEAN],
'required': True,
'enum': enum,
},
])
parameter_values = {
'id': value,
}
validate_parameters(parameter_values, parameters, {})
@pytest.mark.parametrize(
'enum,value',
(
([True, False], True),
([True, False], None),
([0, 1, 2, 3], 1),
([0, 1, 2, 3], None),
(['1', '2', 'a', 'b'], 'a'),
(['1', '2', 'a', 'b'], None),
),
)
def test_nullable_enum_validation_with_allowed_values(enum, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description': 'id',
'type': [STRING, NUMBER, BOOLEAN],
'required': True,
'enum': enum,
'x-nullable': True
},
])
parameter_values = {
'id': value,
}
validate_parameters(parameter_values, parameters, {})
@pytest.mark.parametrize(
'enum,value',
(
([True, False], None),
([0, 1, 2, 3], None),
(['1', '2', 'a', 'b'], None),
),
)
def test_nullable_enum_with_null_values_strict(enum, value, monkeypatch):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description': 'id',
'type': [STRING, NUMBER, BOOLEAN],
'required': True,
'enum': enum,
'x-nullable': True
},
])
parameter_values = {
'id': value,
}
monkeypatch.setattr(os, 'environ', {FLEX_DISABLE_X_NULLABLE: '1'})
with pytest.raises(ValidationError) as err:
validate_parameters(parameter_values, parameters, {})
assert_message_in_errors(
MESSAGES['enum']['invalid'],
err.value.detail,
'id.enum',
)
@pytest.mark.parametrize(
'enum,value',
(
([True, False], 0),
([True, False], 1),
([True, False], ''),
([0, 1, 2, 3], True),
([0, 1, 2, 3], False),
([0, 1, 2, 3], '1'),
([0, 1, 2, 3], 4),
([0, 1, 2, 3], 1.0),
(['1', '2', 'a', 'b'], 'A'),
(['1', '2', 'a', 'b'], 1),
(['1', '2', 'a', 'b'], 2),
),
)
def test_nullable_enum_with_invalid_values(enum, value):
parameters = parameters_validator([
{
'name': 'id',
'in': PATH,
'description': 'id',
'type': [STRING, NUMBER, BOOLEAN],
'required': True,
'enum': enum,
'x-nullable': True
},
])
parameter_values = {
'id': value,
}
with pytest.raises(ValidationError) as err:
validate_parameters(parameter_values, parameters, {})
assert_message_in_errors(
MESSAGES['enum']['invalid'],
err.value.detail,
'id.enum',
)
|
|
#!/usr/bin/python3
import unittest
from pymamba import Database, Table, _debug, xIndexMissing, xWriteFail, xTableMissing, size_mb, size_gb
from subprocess import call
class UnitTests(unittest.TestCase):
_db_name = 'databases/unit-db'
_tb_name = 'demo1'
_debug = True
_data = [
{'name': 'Gareth Bult', 'age': 21, 'admin': True, 'cat': 'A'},
{'name': 'Squizzey', 'age': 3000, 'cat': 'A'},
{'name': 'Fred Bloggs', 'age': 45, 'cat': 'A'},
{'name': 'John Doe', 'age': 40, 'admin': True, 'cat': 'B'},
{'name': 'John Smith', 'age': 40, 'cat': 'B'},
{'name': 'Jim Smith', 'age': 40, 'cat': 'B'},
{'name': 'Gareth Bult1', 'age': 21, 'admin': True, 'cat': 'B'}
]
def setUp(self):
call(['rm', '-rf', self._db_name])
def tearDown(self):
pass
def generate_data(self, db, table_name):
table = db.table(table_name)
for row in self._data:
if '_id' in row:
del row['_id']
table.append(dict(row))
def generate_data1(self, db, table_name):
with db.env.begin(write=True) as txn:
table = db.table(table_name)
for row in self._data:
if '_id' in row:
del row['_id']
table.append(dict(row), txn=txn)
def generate_data2(self, db, table_name):
with db.begin():
table = db.table(table_name)
for row in self._data:
if '_id' in row:
del row['_id']
table.append(dict(row))
def test_00_debug(self):
_debug(self, "We are here!")
def test_01_open_database(self):
db = Database(self._db_name, size=size_mb(10))
self.assertTrue(isinstance(db, Database))
def test_02_create_table(self):
db = Database(self._db_name, size=size_gb(0.1))
table = db.table(self._tb_name)
self.assertTrue(isinstance(table, Table))
def test_03_tables(self):
db = Database(self._db_name)
db.table(self._tb_name)
self.assertEqual(db.tables, ['demo1'])
def test_04_exists(self):
db = Database(self._db_name)
db.table(self._tb_name)
self.assertTrue(db.exists('demo1'))
def test_05_drop(self):
db = Database(self._db_name)
db.table(self._tb_name)
db.drop(self._tb_name)
self.assertFalse(db.exists('demo1'))
self.assertEqual(db.tables, [])
def test_06_append(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
for doc in self._data:
table.append(dict(doc))
self.assertEqual(table.records, len(self._data))
def test_07_empty(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
for doc in self._data:
table.append(dict(doc))
self.assertEqual(table.records, len(self._data))
table.empty()
self.assertEqual(table.records, 0)
self.assertTrue(db.exists('demo1'))
def test_08_delete(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
for doc in self._data:
table.append(dict(doc))
doc = next(table.find(limit=1))
table.delete(doc)
self.assertEqual(table.records, len(self._data)-1)
def test_08_delete_binlog(self):
db = Database(self._db_name)
db.binlog(True)
self.generate_data(db, self._tb_name)
self.assertEqual(db.tables_all, ['__binlog__', '__metadata__', 'demo1'])
db.close()
db = Database(self._db_name)
db.drop(self._tb_name)
db.drop('__binlog__')
self.assertEqual(db.tables_all, ['__metadata__'])
with self.assertRaises(xTableMissing):
db.drop('fred')
def test_09_create_drop_index(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.assertEqual(table.indexes, ['by_age', 'by_name'])
db.drop(self._tb_name)
self.assertEqual(db.tables, [])
def test_10_put_data(self):
people = {}
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
for item in self._data:
table.append(item)
people[item['name']] = item
for item in table.find():
key = item.get('name', None)
self.assertIsNotNone(key)
if key:
person = people.get(key, None)
self.assertIsNotNone(person)
if person:
self.assertEqual(person['age'], item['age'])
self.assertEqual(person['_id'], item['_id'])
last = ''
for item in table.find('by_name'):
key = item.get('name', None)
self.assertIsNotNone(key)
if key:
person = people.get(key, None)
self.assertIsNotNone(person)
if person:
self.assertEqual(person['age'], item['age'])
self.assertEqual(person['_id'], item['_id'])
self.assertGreaterEqual(person['name'], last)
last = person['name']
last = 0
for item in table.find('by_age'):
key = item.get('name', None)
self.assertIsNotNone(key)
if key:
person = people.get(key, None)
self.assertIsNotNone(person)
if person:
self.assertEqual(person['age'], item['age'])
self.assertEqual(person['_id'], item['_id'])
self.assertGreaterEqual(person['age'], last)
last = person['age']
self.assertEqual(table.records, len(self._data))
self.assertEqual(table.index('by_name').count(), len(self._data))
self.assertEqual(table.index('by_age').count(), len(self._data))
for record in table.find('by_age', limit=3):
table.delete(record['_id'])
self.assertEqual(table.records, len(self._data) - 3)
self.assertEqual(table.index('by_name').count(), len(self._data) - 3)
self.assertEqual(table.index('by_age').count(), len(self._data) - 3)
last = 0
for item in table.find('by_age'):
key = item.get('name', None)
self.assertIsNotNone(key)
if key:
person = people.get(key, None)
self.assertIsNotNone(person)
if person:
self.assertEqual(person['age'], item['age'])
self.assertEqual(person['_id'], item['_id'])
self.assertGreaterEqual(person['age'], last)
last = person['age']
db.drop(self._tb_name)
self.assertEqual(db.tables, [])
def test_11_compound_index(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
self.generate_data(db, self._tb_name)
ages = [doc['age'] for doc in self._data]
ages.sort()
ages.reverse()
for row in table.find('by_age_name'):
self.assertEqual(row['age'], ages.pop())
with self.assertRaises(ValueError):
table.index('broken', '{')
def test_12_table_reopen(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.generate_data(db, self._tb_name)
db.close()
db = Database(self._db_name)
table = db.table(self._tb_name)
self.assertEqual(['by_age', 'by_age_name', 'by_name'], table.indexes)
def test_13_index_exists(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.assertTrue(table.exists('by_name'))
db.close()
db = Database(self._db_name)
table = db.table(self._tb_name)
self.assertTrue(table.exists('by_name'))
def test_14_table_empty(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.generate_data(db, self._tb_name)
self.assertEqual(table.records, len(self._data))
table.empty()
table = db.table(self._tb_name)
self.assertEqual(table.records, 0)
def test_15_check_append_exception(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.generate_data(db, self._tb_name)
table._indexes = 10
before = table.records
with self.assertRaises(xWriteFail):
table.append({'_id': -1})
after = table.records
self.assertEqual(before, after)
def test_16_check_delete_exception(self):
class f(object):
pass
db = Database(self._db_name)
table = db.table(self._tb_name)
with self.assertRaises(TypeError):
table.delete([f])
def test_17_check_drop_exception(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
with self.assertRaises(xTableMissing):
db.drop('no table')
def test_18_check_unindex_exception(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
with self.assertRaises(xIndexMissing):
table.drop_index('fred')
table.index('by_name', '{name}')
self.assertTrue('by_name' in table.indexes)
table.drop_index('by_name')
self.assertFalse('by_name' in table.indexes)
table.index('duff', '{name}')
table._indexes['duff'] = None
with self.assertRaises(AttributeError):
table.drop_index('duff')
def test_19_count_with_txn(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.generate_data(db, self._tb_name)
with db.env.begin() as txn:
index = table.index('by_name')
self.assertTrue(index.count(txn=txn), 7)
def test_20_index_get(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
table.index('by_age', '{age:03}', duplicates=True)
self.generate_data(db, self._tb_name)
with db._env.begin() as txn:
index = table.index('by_name')
_id = index.get(txn, {'name': 'Squizzey'})
doc = table.get(_id)
self.assertTrue(doc['age'], 3000)
self.assertTrue(doc['name'], 'Squizzey')
with self.assertRaises(xIndexMissing):
list(table.find('fred', 'fred'))
def test_21_filters(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
table.index('by_age_name', '{age:03}{name}')
table.index('by_name', '{name}')
self.generate_data(db, self._tb_name)
result = list(table.find(expression=lambda doc: doc['age'] == 3000))[0]
self.assertEqual(result['age'], 3000)
self.assertEqual(result['name'], 'Squizzey')
result = list(table.find('by_name', expression=lambda doc: doc['age'] == 21))[0]
self.assertEqual(result['age'], 21)
self.assertEqual(result['name'], 'Gareth Bult')
result = list(table.find('by_name', expression=lambda doc: doc['name'] == 'John Doe'))[0]
self.assertEqual(result['age'], 40)
self.assertEqual(result['name'], 'John Doe')
def test_22_reindex(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
by_age_name = table.index('by_age_name', '{age:03}{name}')
by_name = table.index('by_name', '{name}')
by_age = table.index('by_age', '{age:03}', duplicates=True)
self.assertEqual(by_age_name.count(), 7)
self.assertEqual(by_name.count(), 7)
self.assertEqual(by_age.count(), 7)
table.reindex()
self.assertEqual(by_age_name.count(), 7)
self.assertEqual(by_name.count(), 7)
self.assertEqual(by_age.count(), 7)
def test_23_function_index(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
table.index('by_compound', '{cat}|{name}', duplicates=True)
table.index('by_age', '{age:03}', duplicates=True)
results = []
for doc in table.find('by_compound'):
results.append(doc['cat'])
self.assertEqual(results, ['A', 'A', 'A', 'B', 'B', 'B', 'B'])
table.empty()
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
results = []
for doc in table.find('by_compound'):
results.append(doc['cat'])
self.assertEqual(results, ['A', 'A', 'A', 'B', 'B', 'B', 'B'])
for i in table.seek('by_compound', {'cat': 'A', 'name': 'Squizzey'}):
#print("}}}",i)
self.assertEqual(i['age'], 3000)
for i in table.seek('by_compound', {'cat': 'B', 'name': 'John Doe'}):
self.assertEqual(i['age'], 40)
self.assertEqual(list(table.seek('by_compound', {'cat': 'C', 'name': 'Squizzey'})), [])
lower = {'cat': 'A', 'name': 'Squizzey'}
upper = {'cat': 'B', 'name': 'Gareth Bult1'}
iter = table.range('by_compound', lower, upper)
results = list(iter)
self.assertEqual(results[0]['name'], 'Squizzey')
self.assertEqual(results[1]['name'], 'Gareth Bult1')
#print(results[0])
results[0]['name'] = '!Squizzey'
results[0]['age'] = 1
table.save(results[0])
table._indexes['duff'] = None
with self.assertRaises(AttributeError):
table.save(results[0])
self.assertEqual(list(table.find('by_compound'))[0]['name'], '!Squizzey')
self.assertEqual(list(table.find('by_age'))[0]['age'], 1)
def test_24_partial_index(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
table.index('by_admin', '{admin}', duplicates=True)
try:
for doc in table.find('by_admin'): pass
#print("> {admin}".format(**doc), doc)
except Exception as error:
self.fail('partial key failure')
raise error
self.assertEqual(table.index('by_admin').count(), 3)
with self.assertRaises(AttributeError):
table.unindex('by_admin', 123)
def test_25_seek_one(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
table.index('by_age', '{age:03}', duplicates=True)
doc = table.seek_one('by_age', {'age': 3000})
self.assertEqual(doc['age'], 3000)
self.assertEqual(doc['name'], 'Squizzey')
def test_26_drop_reuse(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
db.drop(self._tb_name)
table = db.table(self._tb_name)
self.generate_data(db, self._tb_name)
table.index('by_age', '{age:03}', duplicates=True)
doc = table.seek_one('by_age', {'age': 3000})
self.assertEqual(doc['age'], 3000)
self.assertEqual(doc['name'], 'Squizzey')
for doc in table.find():
_id = doc['_id']
name = doc['name']
break
with db.begin():
db.restructure(self._tb_name)
table = db.table(self._tb_name)
for doc in table.find():
#print(doc)
self.assertEqual(doc['name'], name)
self.assertEqual(doc['_id'], _id)
break
def test_28_range(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
data = [
{'code': 'F', 'name': 'Tom'},
{'code': 'E', 'name': 'Dick'},
{'code': 'E', 'name': 'Dick1'},
{'code': 'D', 'name': 'Harry'},
{'code': 'C', 'name': 'Fred'},
{'code': 'B', 'name': 'John'},
{'code': 'B', 'name': 'John1'},
{'code': 'A', 'name': 'Sam'},
]
for row in data:
table.append(row)
table.index('by_code', '{code}', duplicates=True)
res = list(table.find('by_code'))
self.assertEqual(res[0]['code'], 'A')
self.assertEqual(res[-1]['code'], 'F')
res = list(table.find())
lower = res[0]['_id']
upper = res[-1]['_id']
#print('Lower={} Upper={}'.format(lower, upper))
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
self.assertEqual(natural[0]['code'], 'F')
self.assertEqual(natural[-1]['code'], 'A')
res = list(table.find())
lower = res[0]['_id']
upper = res[-2]['_id']
#print('Lower={} Upper={}'.format(lower, upper))
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
#for doc in natural:
# print(doc)
self.assertEqual(natural[0]['code'], 'F')
self.assertEqual(natural[-1]['code'], 'B')
res = list(table.find())
lower = res[0]['_id']
upper = res[-1]['_id']
#print('Lower={} Upper={}'.format(lower, upper))
natural = list(table.range(None, {'_id': lower}, {'_id': upper}, inclusive=False))
#for doc in natural:
# print(doc)
self.assertEqual(natural[0]['code'], 'E')
self.assertEqual(natural[-1]['code'], 'B')
res = list(table.find())
lower = None
upper = res[-1]['_id']
#print('Lower={} Upper={}'.format(lower, upper))
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
self.assertEqual(natural[0]['code'], 'F')
self.assertEqual(natural[-1]['code'], 'A')
res = list(table.find())
lower = res[0]['_id']
upper = None
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
self.assertEqual(natural[0]['code'], 'F')
self.assertEqual(natural[-1]['code'], 'A')
res = list(table.find())
lower = None
upper = None
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
self.assertEqual(natural[0]['code'], 'F')
self.assertEqual(natural[-1]['code'], 'A')
lower = res[0]['_id']
upper = res[0]['_id']
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
self.assertEqual(natural[0]['code'], 'F')
lower = res[-1]['_id']
upper = res[-1]['_id']
natural = list(table.range(None, {'_id': lower}, {'_id': upper}))
self.assertEqual(natural[0]['code'], 'A')
lower = res[0]['_id']
upper = res[0]['_id']
natural = list(table.range(None, {'_id': lower}, {'_id': upper}, inclusive=False))
self.assertEqual(natural, [])
lower = res[-1]['_id']
upper = res[-1]['_id']
natural = list(table.range(None, {'_id': lower}, {'_id': upper}, inclusive=False))
self.assertEqual(natural, [])
lower = res[0]['_id']
upper = res[1]['_id']
natural = list(table.range(None, {'_id': lower}, {'_id': upper}, inclusive=False))
self.assertEqual(natural, [])
lower = res[0]['_id']
upper = res[2]['_id']
natural = list(table.range(None, {'_id': lower}, {'_id': upper}, inclusive=False))
self.assertEqual(natural[0]['_id'], res[1]['_id'])
table.index('by_code', '{code}')
res = list(table.range('by_code', {'code': '0'}, {'code': 'Z'}))
self.assertEqual(res[0]['code'], 'A')
self.assertEqual(res[-1]['code'], 'F')
res = list(table.range('by_code', {'code': 'B'}, {'code': 'E'}))
self.assertEqual(res[0]['code'], 'B')
self.assertEqual(res[-1]['code'], 'E')
res = list(table.range('by_code', {'code': 'B'}, {'code': 'E'}, inclusive=False))
self.assertEqual(res[0]['code'], 'C')
self.assertEqual(res[-1]['code'], 'D')
res = list(table.range('by_code', {'code': 'A'}, {'code': 'F'}, inclusive=False))
self.assertEqual(res[0]['code'], 'B')
self.assertEqual(res[-1]['code'], 'E')
res = list(table.range('by_code', None, None))
self.assertEqual(res[0]['code'], 'A')
self.assertEqual(res[-1]['code'], 'F')
def test_29_with_txn(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data1(db, self._tb_name)
self.generate_data2(db, self._tb_name)
with db.begin():
with self.assertRaises(xIndexMissing):
next(table.find('123'))
with db.begin():
idx = table.index('by_name', '{name}', duplicates=True)
idx.empty(db.transaction.txn)
self.generate_data1(db, self._tb_name)
with db.begin():
table.drop_index('by_name')
table.index('by_name', '{name}', duplicates=True)
docs = list(table.find())
doc = docs[0]
table.delete(doc)
doc = docs[1]
doc['age'] += 1
table.save(doc)
docs = list(table.find())
doc = docs[0]
self.assertEqual(doc['age'], 3001)
all = db.tables_all
cmp = ['__binlog__','__metadata__', '_demo1_by_name', 'demo1']
all.sort()
cmp.sort()
self.assertEqual(all, cmp)
db.binlog(False)
all = db.tables_all
all.sort()
cmp = ['__metadata__', '_demo1_by_name', 'demo1']
cmp.sort()
self.assertEqual(all, cmp)
db.drop('demo1')
with self.assertRaises(Exception):
with db.begin():
raise Exception('catch this')
self.assertEqual(db.tables_all, ['__metadata__'])
db.binlog(False)
db.sync(True)
db = Database(self._db_name, binlog=False)
db.binlog()
db.binlog(False)
def test_30_ensure(self):
db = Database(self._db_name)
table = db.table(self._tb_name)
self.generate_data1(db, self._tb_name)
with db.begin():
index = table.ensure('by_name', '{name}', True, False)
index = table.ensure('by_name', '{name}', True, False)
index = table.ensure('by_name', '{name}', True, True)
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from __future__ import print_function
import six.moves.cPickle as pickle
from traits.api import Bool, Any, Instance, Button, Property, Event, on_trait_change
from traitsui.api import View, Item, Handler, HGroup
# ============= standard library imports ========================
# from threading import Thread
from threading import Event as TEvent
from numpy import linspace, argmin, argmax, random, asarray
import time
import os
# ============= local library imports ==========================
from pychron.core.time_series.time_series import smooth
from pychron.image.cv_wrapper import grayspace, crop, get_focus_measure
# from pychron.image.cvwrapper import grayspace, get_focus_measure, crop, resize
from scipy.ndimage.measurements import variance
from scipy.ndimage.filters import generic_gradient_magnitude, sobel
from scipy.ndimage import sum as ndsum
from pychron.paths import paths
from pychron.managers.manager import Manager
from pychron.image.image import Image
# from pychron.machine_vision.focus_parameters import FocusParameters
# from pychron.image.image_editor import ImageEditor
from pychron.graph.graph import Graph
from pychron.mv.focus.focus_parameters import FocusParameters
from pychron.core.ui.image_editor import ImageEditor
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.ui.thread import Thread
class ConfigureHandler(Handler):
def closed(self, info, isok):
if isok:
info.object.dump_parameters()
class AutoFocusManager(Manager):
"""
currently uses passive focus techniques
see
http://en.wikipedia.org/wiki/Autofocus
"""
video = Any
laser_manager = Any
stage_controller = Any
canvas = Any
parameters = Instance(FocusParameters)
configure_button = Button('configure')
autofocus_button = Event
autofocus_label = Property(depends_on='autofocusing')
autofocusing = Bool
# threading event for cancel signal
_evt_autofocusing = None
image = Instance(Image, ())
graph = None
def dump_parameters(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
self.info('dumping parameters to {}'.format(p))
with open(p, 'wb') as f:
pickle.dump(self.parameters, f)
def load_parameter(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
if os.path.isfile(p):
with open(p, 'rb') as f:
try:
params = pickle.load(f)
self.info('loading parameters from {}'.format(p))
if not isinstance(params, FocusParameters):
self.info('out of date parameters file. using default')
params = FocusParameters()
return params
except Exception as e:
print('autofocus load parameter', e)
return FocusParameters()
else:
return FocusParameters()
def passive_focus(self, block=False, **kw):
self._evt_autofocusing = TEvent()
self._evt_autofocusing.clear()
# manager = self.laser_manager
oper = self.parameters.operator
self.info('passive focus. operator = {}'.format(oper))
g = self.graph
if not g:
g = Graph(plotcontainer_dict=dict(padding=10),
window_x=0.70,
window_y=20,
window_width=325,
window_height=325,
window_title='Autofocus'
)
self.graph = g
g.clear()
g.new_plot(padding=[40, 10, 10, 40],
xtitle='Z (mm)',
ytitle='Focus Measure ({})'.format(oper)
)
g.new_series()
g.new_series()
invoke_in_main_thread(self._open_graph)
target = self._passive_focus
self._passive_focus_thread = Thread(name='autofocus', target=target,
args=(self._evt_autofocusing,
),
kwargs=kw
)
self._passive_focus_thread.start()
if block:
# while 1:
# if not self._passive_focus_thread.isRunning():
# break
# time.sleep(0.25)
self._passive_focus_thread.join()
def _open_graph(self):
ui = self.graph.edit_traits()
self.add_window(ui)
def stop_focus(self):
if self.stage_controller:
self.stage_controller.stop()
self.info('autofocusing stopped by user')
def _passive_focus(self, stop_signal, set_zoom=True):
'''
sweep z looking for max focus measure
FMgrad= roberts or sobel (sobel removes noise)
FMvar = intensity variance
'''
self.autofocusing = True
manager = self.laser_manager
fstart = self.parameters.fstart
fend = self.parameters.fend
step_scalar = self.parameters.step_scalar
zoom = self.parameters.zoom
operator = self.parameters.operator
steps = step_scalar * (max(fend, fstart) - min(fend, fstart)) + 1
prev_zoom = None
if set_zoom and \
manager is not None and \
zoom:
motor = manager.get_motor('zoom')
if motor:
prev_zoom = motor.data_position
self.info('setting zoom: {}'.format(zoom))
manager.set_motor('zoom', zoom, block=True)
time.sleep(1.5)
args = self._do_focusing(fstart, fend, steps, operator)
if manager is not None:
if prev_zoom is not None:
self.info('returning to previous zoom: {}'.format(prev_zoom))
manager.set_motor('zoom', prev_zoom, block=True)
if args:
mi, fmi, ma, fma = args
self.info('''passive focus results:Operator={}
ImageGradmin={} (z={})
ImageGradmax={}, (z={})'''.format(operator, mi, fmi, ma, fma))
focus_pos = fma
self.graph.add_vertical_rule(focus_pos)
self.graph.redraw()
# self.graph.add_vertical_rule(fma)
self.info('calculated focus z= {}'.format(focus_pos))
# if set_z:
controller = self.stage_controller
if controller is not None:
if not stop_signal.isSet():
controller.single_axis_move('z', focus_pos, block=True)
controller._z_position = focus_pos
controller.z_progress = focus_pos
self.autofocusing = False
def _cancel_sweep(self, vo):
if self._evt_autofocusing.isSet():
# return to original velocity
self.autofocusing = False
self._reset_velocity(vo)
return True
def _reset_velocity(self, vo):
if self.stage_controller:
pdict = dict(velocity=vo, key='z')
self.stage_controller.set_single_axis_motion_parameters(pdict=pdict)
def _do_focusing(self, start, end, steps, operator):
screen_roi = self._get_roi()
self._add_focus_area_rect(*screen_roi)
src = self._load_source()
src = asarray(src)
h, w, _d = src.shape
cx = w / 2.
cy = h / 2.
cw = self.parameters.crop_width
ch = self.parameters.crop_height
roi = cx, cy, cw, ch
'''
start the z in motion and take pictures as you go
query stage_controller to get current z
'''
self.info('focus sweep start={} end={}'.format(start, end))
# move to start position
controller = self.stage_controller
if controller:
vo = controller.axes['z'].velocity
if self._cancel_sweep(vo):
return
self.graph.set_x_limits(min(start, end), max(start, end), pad=2)
# sweep 1 and velocity 1
self._do_sweep(start, end, velocity=self.parameters.velocity_scalar1)
fms, focussteps = self._collect_focus_measures(operator, roi)
if not (fms and focussteps):
return
# reached end of sweep
# calculate a nominal focal point
args = self._calculate_nominal_focal_point(fms, focussteps)
if not args:
return
nfocal = args[3]
nwin = self.parameters.negative_window
pwin = self.parameters.positive_window
if self._cancel_sweep(vo):
return
nstart, nend = max(0, nfocal - nwin), nfocal + pwin
# mi = min(min(nstart, nend), min(start, end))
# ma = max(max(nstart, nend), max(start, end))
# self.graph.set_x_limits(mi, ma, pad=2)
time.sleep(1)
# do a slow tight sweep around the nominal focal point
self._do_sweep(nstart, nend, velocity=self.parameters.velocity_scalar2)
fms, focussteps = self._collect_focus_measures(operator, roi, series=1)
self._reset_velocity(vo)
else:
focussteps = linspace(0, 10, 11)
fms = -(focussteps - 5) ** 2 + 10 + random.random(11)
self.info('frames analyzed {}'.format(len(fms)))
# self.canvas.markupcontainer.pop('croprect')
return self._calculate_nominal_focal_point(fms, focussteps)
def _do_sweep(self, start, end, velocity=None):
controller = self.stage_controller
controller.single_axis_move('z', start, block=True)
# time.sleep(0.1)
# explicitly check for motion
# controller.block(axis='z')
if velocity:
vo = controller.axes['z'].velocity
controller.set_single_axis_motion_parameters(pdict=dict(velocity=vo * velocity,
key='z'))
self.info('starting sweep from {}'.format(controller.z_progress))
# pause before moving to end
time.sleep(0.25)
controller.single_axis_move('z', end, update=100, immediate=True)
def _collect_focus_measures(self, operator, roi, series=0):
controller = self.stage_controller
focussteps = []
fms = []
if controller.timer:
p = controller.timer.get_interval()
self.debug('controller timer period {}'.format(p))
pz = controller.z_progress
while 1:
src = self._load_source()
x = controller.z_progress
if x != pz:
y = self._calculate_focus_measure(src, operator, roi)
self.graph.add_datum((x, y), series=series)
focussteps.append(x)
fms.append(y)
pz = x
if not (controller.timer.isActive() and \
not self._evt_autofocusing.isSet()):
break
time.sleep(p)
self.debug('sweep finished')
return fms, focussteps
def _calculate_nominal_focal_point(self, fms, focussteps):
if fms:
sfms = smooth(fms)
if sfms is not None:
self.graph.new_series(focussteps, sfms)
self.graph.redraw()
fmi = focussteps[argmin(sfms)]
fma = focussteps[argmax(sfms)]
mi = min(sfms)
ma = max(sfms)
return mi, fmi, ma, fma
def _calculate_focus_measure(self, src, operator, roi):
'''
see
IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
FOR DIGITAL STILL CAMERA
DOI 10.1109/30.468047
and
http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
'''
# need to resize to 640,480. this is the space the roi is in
# s = resize(grayspace(pychron), 640, 480)
src = grayspace(src)
v = crop(src, *roi)
di = dict(var=lambda x:variance(x),
laplace=lambda x: get_focus_measure(x, 'laplace'),
sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
)
func = di[operator]
return func(v)
def image_view(self):
v = View(Item('image', show_label=False, editor=ImageEditor(),
width=640,
height=480,
style='custom'))
return v
def traits_view(self):
v = View(
HGroup(self._button_factory('autofocus_button', 'autofocus_label'),
Item('configure_button', show_label=False),
show_border=True,
label='Autofocus'
)
)
return v
def configure_view(self):
v = View(Item('parameters', style='custom', show_label=False),
handler=ConfigureHandler,
buttons=['OK', 'Cancel'],
kind='livemodal',
title='Configure Autofocus',
x=0.80,
y=0.05
)
return v
def _load_source(self):
src = self.video.get_frame()
return src
# if pychron:
# return Image.new_frame(pychron)
# self.image.load(pychron)
# return self.image.source_frame
def _get_roi(self):
w = self.parameters.crop_width
h = self.parameters.crop_height
cx, cy = self.canvas.get_center_rect_position(w, h)
# cw, ch = self.canvas.outer_bounds
# print w, h, cw, ch
# cx = cw / 2. - w / 2.
# cy = ch / 2. - h / 2.
# cx = (cw - w) / 2.
# cy = (ch - h) / 2.
# cx = (640 * self.canvas.scaling - w) / 2
# cy = (480 * self.canvas.scaling - h) / 2
roi = cx, cy, w, h
return roi
def _add_focus_area_rect(self, cx, cy, w, h):
# pl = self.canvas.padding_left
# pb = self.canvas.padding_bottom
self.canvas.remove_item('croprect')
self.canvas.add_markup_rect(cx, cy, w, h, identifier='croprect')
def _autofocus_button_fired(self):
if not self.autofocusing:
self.autofocusing = True
self.passive_focus()
else:
self.autofocusing = False
self._evt_autofocusing.set()
self.stop_focus()
def _configure_button_fired(self):
self._crop_rect_update()
self.edit_traits(view='configure_view', kind='livemodal')
self.canvas.remove_item('croprect')
# try:
# self.canvas.markupcontainer.pop('croprect')
# except KeyError:
# pass
@on_trait_change('parameters:[_crop_width,_crop_height]')
def _crop_rect_update(self):
roi = self._get_roi()
self._add_focus_area_rect(*roi)
def _get_autofocus_label(self):
return 'Autofocus' if not self.autofocusing else 'Stop'
def _parameters_default(self):
return self.load_parameter()
def _autofocusing_changed(self, new):
if not new:
self.canvas.remove_item('croprect')
# ===============================================================================
# Deprecated
# ===============================================================================
# ============= EOF =====================================
|
|
# -*- coding: utf-8 -*-
"""The SleuthKit (TSK) file system implementation."""
import pytsk3
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.lib import tsk_image
from dfvfs.path import tsk_path_spec
from dfvfs.resolver import resolver
from dfvfs.vfs import file_system
from dfvfs.vfs import tsk_file_entry
class TSKFileSystem(file_system.FileSystem):
"""File system that uses pytsk3."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_TSK
def __init__(self, resolver_context, path_spec):
"""Initializes a file system.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(TSKFileSystem, self).__init__(resolver_context, path_spec)
self._file_object = None
self._tsk_file_system = None
self._tsk_fs_type = None
def _Close(self):
"""Closes a file system.
Raises:
IOError: if the close failed.
"""
self._tsk_file_system = None
self._file_object = None
def _Open(self, mode='rb'):
"""Opens the file system object defined by path specification.
Args:
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file system object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
self._path_spec.parent, resolver_context=self._resolver_context)
tsk_image_object = tsk_image.TSKFileSystemImage(file_object)
tsk_file_system = pytsk3.FS_Info(tsk_image_object)
self._file_object = file_object
self._tsk_file_system = tsk_file_system
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
"""
# Opening a file by inode number is faster than opening a file by location.
tsk_file = None
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
try:
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
except IOError:
pass
return tsk_file is not None
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
TSKFileEntry: a file entry or None if not available.
"""
# Opening a file by inode number is faster than opening a file by location.
tsk_file = None
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
root_inode = self.GetRootInode()
if (location == self.LOCATION_ROOT or
(inode is not None and root_inode is not None and inode == root_inode)):
tsk_file = self._tsk_file_system.open(self.LOCATION_ROOT)
return tsk_file_entry.TSKFileEntry(
self._resolver_context, self, path_spec, tsk_file=tsk_file,
is_root=True)
try:
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
except IOError:
pass
if tsk_file is None:
return None
# TODO: is there a way to determine the parent inode number here?
return tsk_file_entry.TSKFileEntry(
self._resolver_context, self, path_spec, tsk_file=tsk_file)
def GetFsInfo(self):
"""Retrieves the file system info.
Returns:
pytsk3.FS_Info: file system info.
"""
return self._tsk_file_system
def GetFsType(self):
"""Retrieves the file system type.
Returns:
pytsk3.TSK_FS_TYPE_ENUM: file system type.
"""
if self._tsk_fs_type is None:
self._tsk_fs_type = pytsk3.TSK_FS_TYPE_UNSUPP
if (not self._tsk_file_system or
not hasattr(self._tsk_file_system, 'info')):
return self._tsk_fs_type
self._tsk_fs_type = getattr(
self._tsk_file_system.info, 'ftype', pytsk3.TSK_FS_TYPE_UNSUPP)
return self._tsk_fs_type
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
TSKFileEntry: a file entry.
"""
kwargs = {}
root_inode = self.GetRootInode()
if root_inode is not None:
kwargs['inode'] = root_inode
kwargs['location'] = self.LOCATION_ROOT
kwargs['parent'] = self._path_spec.parent
path_spec = tsk_path_spec.TSKPathSpec(**kwargs)
return self.GetFileEntryByPathSpec(path_spec)
def GetRootInode(self):
"""Retrieves the root inode.
Returns:
int: inode number or None if not available.
"""
# Note that because pytsk3.FS_Info does not explicitly define info
# we need to check if the attribute exists and has a value other
# than None
if getattr(self._tsk_file_system, 'info', None) is None:
return None
# Note that because pytsk3.TSK_FS_INFO does not explicitly define
# root_inum we need to check if the attribute exists and has a value
# other than None
return getattr(self._tsk_file_system.info, 'root_inum', None)
def GetTSKFileByPathSpec(self, path_spec):
"""Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location.
"""
# Opening a file by inode number is faster than opening a file
# by location.
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
else:
raise errors.PathSpecError(
'Path specification missing inode and location.')
return tsk_file
def IsExt(self):
"""Determines if the file system is ext2, ext3 or ext4.
Returns:
bool: True if the file system is ext.
"""
tsk_fs_type = self.GetFsType()
return tsk_fs_type in [
pytsk3.TSK_FS_TYPE_EXT2, pytsk3.TSK_FS_TYPE_EXT3,
pytsk3.TSK_FS_TYPE_EXT4, pytsk3.TSK_FS_TYPE_EXT_DETECT]
def IsHFS(self):
"""Determines if the file system is HFS, HFS+ or HFSX.
Returns:
bool: True if the file system is HFS.
"""
tsk_fs_type = self.GetFsType()
return tsk_fs_type in [
pytsk3.TSK_FS_TYPE_HFS, pytsk3.TSK_FS_TYPE_HFS_DETECT]
def IsNTFS(self):
"""Determines if the file system is NTFS.
Returns:
bool: True if the file system is NTFS.
"""
tsk_fs_type = self.GetFsType()
return tsk_fs_type in [
pytsk3.TSK_FS_TYPE_NTFS, pytsk3.TSK_FS_TYPE_NTFS_DETECT]
|
|
#!/usr/bin/env python
# encoding: utf-8
r"""
Data Module
Contains the general class definition and the subclasses of the Clawpack data
objects.
:Authors:
Kyle T. Mandli and Randall J. LeVeque (2008-08-07) Initial version
Randall J. LeVeque (2008-08-07) Plotting data objects
Alan McIntyre (2009-01-01) Speed ups and rebuilding of Data
Kyle T. Mandli (2009-04-01) Stripped down and improved version
"""
# ============================================================================
# Copyright (C) 2008 Kyle T. Mandli <mandli@amath.washington.edu>
# Copyright (C) 2008 Randall J. LeVeque <rjl@amath.washington.edu>
# Copyright (C) 2009 Alan McIntyre <amcin001@amath.washington.edu>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import shutil
import os
import copy
import re
import logging
# ========== Parse Value Utility Function ====================================
def _parse_value(value):
r"""
Attempt to make sense of a value string from a config file. If the
value is not obviously an integer, float, or boolean, it is returned as
a string stripped of leading and trailing whitespace.
:Input:
- *value* - (string) Value string to be parsed
:Output:
- (id) - Appropriate object based on *value*
"""
value = value.strip()
if not value:
return None
# assume that values containing spaces are lists of values
if len(value.split()) > 1:
return [_parse_value(vv) for vv in value.split()]
try:
# see if it's an integer
value = int(value)
except ValueError:
try:
# see if it's a float
value = float(value)
except ValueError:
# see if it's a bool
if value[0] == 'T':
value = True
elif value[0] == 'F':
value = False
return value
# ============================================================================
# General Data Class
# ============================================================================
class Data(object):
r"""
Generalized clawpack data object
Generalized class for Clawpack data. Contains generic methods for reading
and writing data to and from a data file.
:Initialization:
Input:
- *data_files* - (List of strings) Paths to data files to be read in,
an empty data object can be created by providing no data files.
- *attributes* - (List of strings) List of required attribute names
which will be initialized to None.
:Version: 1.2 (2009-04-01)
"""
__name__ = 'Data'
def __init__(self, data_files=[], attributes=None):
"""
Initialize a Data object
See :class:`Data` for more info.
"""
# Internal bookkeeping variables
self.__attributes = []
self.__owners = {}
# Setup data logger
self.logger = logging.getLogger('data')
# Initialize from attribute list provided
if attributes:
for attr in attributes:
self.add_attribute(attr,None,None)
# Read data files from data_files list
if isinstance(data_files, basestring):
data_files = [data_files]
elif not isinstance(data_files, list):
raise Exception("data_files must be a list of strings")
if len(data_files) > 0:
self.read(data_files)
# ========== Return string representation of this Data Object ========
def __str__(self):
output = "%s%s%s\n" % ("Name".ljust(25),"Value".ljust(12),
"Owner".ljust(12))
for (k,v) in self.iteritems():
output += "%s%s%s\n" % (str(k).ljust(25),
str(v).ljust(12),
str(self.__owners[k]).ljust(12))
return output
# ========== Access Methods ==============================================
def add_attribute(self, name, value=None, owner=None):
r"""
Adds an attribute called name to the data object
If an attribute needs to be added to the object, this routine must be
called or the attribute will not be written out.
:Input:
- *name* - (string) Name of the data attribute
- *value* - (id) Value to set *name* to, defaults to None
- *owner* - (id) Owner of this particular attribute
"""
setattr(self,name,value)
self.__owners[name] = owner
if name not in self.__attributes:
self.__attributes.append(name)
def remove_attributes(self, arg_list):
r"""
Remove the listed attributes.
"""
# Convert to list if args is not already a list
if not isinstance(arg_list,list):
arg_list = [arg_list]
for arg in arg_list:
self.__owners.pop(arg)
self.__attributes.remove(arg)
delattr(self,arg)
def attributes():
def fget(self): return self.__attributes
return locals()
attributes = property(**attributes())
def has_attribute(self,name):
r"""
Check if this data object has the given attributes
:Input:
- *name* - (string) Name of attribute
:Output:
- (bool) - True if data object contains a data attribute name
"""
return name in self.__attributes
def set_owner(self,name,owner):
r"""
Sets the owner of the given data
:Input:
- *name* - (string) Name of attribute
- *owner* - (id) Owner of the attribute
"""
if name not in self.__attributes:
raise KeyError("No attribute named %s" % name)
self.__owners[name] = owner
def get_owner(self,name):
r"""
Returns the owner of the data attribute name
:Input:
- *name* - (string) Name of attribute
:Output:
- (id) - Owner of attribute
"""
return self.__owners[name]
def get_owners(self,supplementary_file=None):
r"""
Returns a list of owners excluding the owner None
If supplementary_file is provided, None is replace by that owner.
:Input:
- *supplementary_file* - (string) Supplementary file, defaults to
None.
:Output:
- (list) - Returns a list of owners
"""
owners = []
for (key,owner) in self.__owners.iteritems():
if owner is None:
self.__owners[key] = supplementary_file
# This simultaneously finds one instance of an owner and tests
# to see if the supplementary_file is not None
owner = self.__owners[key]
if owner not in owners and owner is not None:
owners.append(owner)
return owners
def iteritems(self):
r"""
Returns an iterator of keys and values from this object
:Output:
- (Iterator) Iterator over keys and values
"""
return [(k,getattr(self,k)) for k in self.__attributes]
# ========== Read in a collection of data files ==========================
def read(self,data_paths):
r"""
Read data in from a clawpack style data file.
Any lines of the form::
values =: name
is used to set an attribute of self.
INPUT
data_paths : Path to a data file to be read in, can also be a list
of files to be read in.
"""
if isinstance(data_paths, basestring):
data_paths = [data_paths]
for filename in data_paths:
filename = os.path.abspath(filename)
if not os.path.exists(filename):
raise Exception("No such data file: %s" % filename)
# self.logger.info("Reading from %s" % filename)
for lineno, line in enumerate(file(filename)):
if '=:' not in line:
continue
value, tail = line.split('=:')
varname = tail.split()[0]
oldval = getattr(self, varname, None)
newval = _parse_value(value)
if oldval is not None:
vals = "(old=%r,new=%r)" % (oldval, newval)
# self.logger.debug("Overwriting %s %s" % (varname, vals))
# if newval is None:
# self.logger.warning("Empty value for %s" % varname)
self.add_attribute(varname, newval, filename)
# ========== Write out the data from this object =========================
def write(self,data_files=None,supplementary_file=None):
r"""
Write out the contents of the data object
This method writes out the current required attributes of the data
object to a file or list of files. The format for the output will be
in the form::
values =: name
The order is either retained from the files in the data list or
written in the order they are in the list of attributes
The behavior is determined by the arguments passed into the routine:
No arguments:
The contents of the object will be written out to the files listed
in data_files only if each attribute is contained in the file.
This implies that if an attribute is not located in any of the
files in data_files then it will not be written out to file.
If data_files is provided and data_files is a valid owner:
Write out the attributes appropriate to that file
If data_files is provided and not(data_files in owner):
Write all attributes to the data_file given
If supplementary_file is provided:
Write out any attributes without an owner to this file, all owned
attributes will be written out to the approriate files
"""
# Expand supplementary_file if it is not None
if supplementary_file is not None:
supplementary_file = os.path.abspath(supplementary_file)
# Create list of owners
owners = self.get_owners(supplementary_file=supplementary_file)
#print 'in write: data_files = ',data_files
# Write to the entire owner list
if data_files is None:
file_list = owners
elif isinstance(data_files,str):
#print '<p>in write'; import sys; sys.exit(0)
path = os.path.abspath(data_files)
if path not in owners:
# Create temporary data file to store all data in the object
try:
temp_file = open(path,'w')
for key in self.__attributes:
temp_file.write("1 =: %s\n" % key)
temp_file.close()
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
raise
except:
raise
# Add the path to the file_list
file_list = [path]
# Check to make sure all the paths are in the owner list
elif isinstance(data_files,list):
for path in data_files:
path = os.path.abspath(path)
if not(path in owners):
print "%s is not a registered owner!"
return
file_list = data_files
else:
raise Exception("Invalid argument list given to write().")
# Create temporary supplementary file if requested
if supplementary_file is not None:
try:
sup_file = open(supplementary_file,'w')
for attr in self.__attributes:
if self.__owners[attr] is supplementary_file:
sup_file.write("-1 =: %s\n" % attr)
self.__owners[attr] = supplementary_file
sup_file.close()
except:
raise
# Regular expression for searching each file
regexp = re.compile(r"(?P<values>.*)=:(?P<name>.*)")
# Loop over each file
for data_path in file_list:
# Open the data file and temporary file
try:
data_file = open(data_path,'r')
except(IOError):
raise
try:
temp_path = os.path.join(os.path.dirname(data_path), \
'temp.' + os.path.basename(data_path))
temp_file = open(temp_path,'w')
except(IOError):
print "IOERROR"
raise
try:
for line in data_file:
result = regexp.search(line)
if result:
name = re.split(r'\s+', result.group('name').strip())[0]
values = re.split(r'\s+', result.group('values').strip())
if len(values) == 0:
line = ''
elif self.__owners[name] == data_path or \
data_path not in self.__owners:
newvalues = getattr(self,name)
# Convert newvalues to an appropriate string repr
if isinstance(newvalues,tuple) \
| isinstance(newvalues,list):
# Remove [], (), and ','
newstring = repr(newvalues)[1:-1]
newstring = newstring.replace(',','')
elif isinstance(newvalues,bool):
if newvalues:
newstring = 'T'
else:
newstring = 'F'
else:
newstring = repr(newvalues)
newstart = str.ljust(newstring,25)
line = line.replace(result.group('values') + "=:", \
newstart + " =:")
else:
print "Error writing out %s" % name
raise AttributeError, name
# Write the new line
temp_file.write(line)
except:
raise
# Close files
data_file.close()
temp_file.close()
# Rename the temporary file to the data_path name
try:
shutil.move(temp_path,data_path)
except:
raise
#-----------------------------------------------------------------------
# New classes and functions for dealing with data in setrun function.
class ClawInputData(Data):
r"""
Object that will be written out to claw.data.
"""
def __init__(self, ndim):
super(ClawInputData,self).__init__()
self.add_attribute('ndim',ndim)
# Set default values:
if ndim == 1:
self.add_attribute('mx',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',0)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
elif ndim == 2:
self.add_attribute('mx',100)
self.add_attribute('my',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',2)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
else:
raise AttributeError("Only ndim=1 or 2 supported so far")
def write(self):
print 'Creating data file claw.data for use with xclaw'
make_clawdatafile(self)
class AmrclawInputData(Data):
r"""
Object that will be written out to amr2ez.data.
"""
def __init__(self, ndim):
super(AmrclawInputData,self).__init__()
self.add_attribute('ndim',ndim)
# Set default values:
if ndim == 1:
self.add_attribute('mx',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',0)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
# attributes need only since AMR is done using 2d amrclaw:
self.add_attribute('my',1)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('inraty',[1,1,1,1,1,1])
elif ndim == 2:
self.add_attribute('mx',100)
self.add_attribute('my',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('order',2)
self.add_attribute('order_trans',2)
self.add_attribute('verbosity',0)
self.add_attribute('src_split',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[4])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mbc',2)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
self.add_attribute('inraty',[1])
if ndim <= 2:
# AMR parameters:
self.add_attribute('mxnest',-1)
self.add_attribute('inratx',[1])
self.add_attribute('inratt',[1])
self.add_attribute('auxtype',[])
self.add_attribute('restart',False)
self.add_attribute('checkpt_iousr',1000)
self.add_attribute('tchk',[])
self.add_attribute('tol',-1.0)
self.add_attribute('tolsp',0.05)
self.add_attribute('kcheck',2)
self.add_attribute('ibuff',3)
self.add_attribute('cutoff',0.7)
self.add_attribute('PRINT',False)
self.add_attribute('NCAR',False)
self.add_attribute('fortq',True)
self.add_attribute('dprint',False)
self.add_attribute('eprint',False)
self.add_attribute('edebug',False)
self.add_attribute('gprint',False)
self.add_attribute('nprint',False)
self.add_attribute('pprint',False)
self.add_attribute('rprint',False)
self.add_attribute('sprint',False)
self.add_attribute('tprint',False)
self.add_attribute('uprint',False)
else:
print '*** Error: only ndim=1 or 2 supported so far ***'
raise AttributeError("Only ndim=1 or 2 supported so far")
def write(self):
print 'Creating data file amr2ez.data for use with xamr'
make_amrclawdatafile(self)
make_setgauges_datafile(self)
class SharpclawInputData(Data):
r"""
Object that will be written out to claw.data.
"""
def __init__(self, ndim):
super(SharpclawInputData,self).__init__()
self.add_attribute('ndim',ndim)
# Set default values:
if ndim == 1:
self.add_attribute('mx',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('verbosity',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[5])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('mbc',3)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
self.add_attribute('time_integrator',2)
self.add_attribute('tfluct_solver',0)
self.add_attribute('char_decomp',0)
self.add_attribute('lim_type',2)
self.add_attribute('src_term',0)
elif ndim == 2:
self.add_attribute('mx',100)
self.add_attribute('my',100)
self.add_attribute('nout',5)
self.add_attribute('outstyle',1)
self.add_attribute('tfinal',1.0)
self.add_attribute('dt_initial',1.e-5)
self.add_attribute('dt_max',1.e99)
self.add_attribute('cfl_desired',0.9)
self.add_attribute('cfl_max',1.0)
self.add_attribute('max_steps',5000)
self.add_attribute('dt_variable',1)
self.add_attribute('verbosity',0)
self.add_attribute('mcapa',0)
self.add_attribute('maux',0)
self.add_attribute('meqn',1)
self.add_attribute('mwaves',1)
self.add_attribute('mthlim',[5])
self.add_attribute('t0',0.)
self.add_attribute('xlower',0.)
self.add_attribute('xupper',1.)
self.add_attribute('ylower',0.)
self.add_attribute('yupper',1.)
self.add_attribute('mbc',3)
self.add_attribute('mthbc_xlower',1)
self.add_attribute('mthbc_xupper',1)
self.add_attribute('mthbc_ylower',1)
self.add_attribute('mthbc_yupper',1)
self.add_attribute('restart',0)
self.add_attribute('N_restart',0)
self.add_attribute('time_integrator',2)
self.add_attribute('tfluct_solver',0)
self.add_attribute('char_decomp',0)
self.add_attribute('lim_type',2)
self.add_attribute('src_term',0)
else:
raise AttributeError("Only ndim=1 or 2 supported so far")
def write(self):
print 'Creating data file sharpclaw.data for use with xsclaw'
make_sharpclawdatafile(self)
def open_datafile(name, datasource='setrun.py'):
"""
Open a data file and write a warning header.
Warning header starts with '#' character. These lines are skipped if
data file is opened using the library routine opendatafile.
:Input:
- *name* - (string) Name of data file
- *datasource* - (string) Source for the data
:Output:
- (file) - file object
"""
import string
source = string.ljust(datasource,25)
file = open(name, 'w')
file.write('########################################################\n')
file.write('### DO NOT EDIT THIS FILE: GENERATED AUTOMATICALLY ####\n')
file.write('### To modify data, edit %s ####\n' % source)
file.write('### and then "make .data" ####\n')
file.write('########################################################\n\n')
return file
def data_write(file, dataobj, name=None, descr=''):
r"""
Write out value to data file, in the form ::
value =: name descr
Remove brackets and commas from lists, and replace booleans by T/F.
Also convert numpy array to a list first.
:Input:
- *name* - (string) normally a string defining the variable,
``if name==None``, write a blank line.
- *descr* - (string) A short description to appear on the line
"""
import string
if name is None:
file.write('\n')
else:
try:
value = getattr(dataobj, name)
except:
print "Variable missing: ",name
print " from dataobj = ", dataobj
raise
# Convert value to an appropriate string repr
import numpy
if isinstance(value,numpy.ndarray):
value = list(value)
if isinstance(value,tuple) | isinstance(value,list):
# Remove [], (), and ','
string_value = repr(value)[1:-1]
string_value = string_value.replace(',','')
elif isinstance(value,bool):
if value:
string_value = 'T'
else:
string_value = 'F'
else:
string_value = repr(value)
padded_value = string.ljust(string_value, 25)
padded_name = string.ljust(name, 12)
file.write('%s =: %s %s\n' % (padded_value, padded_name, descr))
def make_clawdatafile(clawdata):
r"""
Take the data specified in clawdata and write it to claw.data in the
form required by the Fortran code lib/main.f95.
"""
# open file and write a warning header:
file = open_datafile('claw.data')
ndim = clawdata.ndim
data_write(file, clawdata, 'ndim', '(number of dimensions)')
data_write(file, clawdata, 'mx', '(cells in x direction)')
if ndim > 1:
data_write(file, clawdata, 'my', '(cells in y direction)')
if ndim == 3:
data_write(file, clawdata, 'mz', '(cells in z direction)')
data_write(file, clawdata, None) # writes blank line
data_write(file, clawdata, 'nout', '(number of output times)')
data_write(file, clawdata, 'outstyle', '(style of specifying output times)')
if clawdata.outstyle == 1:
data_write(file, clawdata, 'tfinal', '(final time)')
elif clawdata.outstyle == 2:
data_write(file, clawdata, 'tout', '(output times)')
elif clawdata.outstyle == 3:
data_write(file, clawdata, 'iout', '(output every iout steps)')
elif clawdata.outstyle == 4:
data_write(file, clawdata, 'output_time_interval', '(between outputs)')
data_write(file, clawdata, 'tfinal', '(final time)')
else:
print '*** Error: unrecognized outstyle'
raise
return
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_initial', '(initial time step dt)')
data_write(file, clawdata, 'dt_max', '(max allowable dt)')
data_write(file, clawdata, 'cfl_max', '(max allowable Courant number)')
data_write(file, clawdata, 'cfl_desired', '(desired Courant number)')
data_write(file, clawdata, 'max_steps', '(max time steps per call to claw)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_variable', '(1 for variable dt, 0 for fixed)')
data_write(file, clawdata, 'order', '(1 or 2)')
if ndim == 1:
data_write(file, clawdata, 'order_trans', '(not used in 1d)')
else:
data_write(file, clawdata, 'order_trans', '(transverse order)')
data_write(file, clawdata, 'verbosity', '(verbosity of output)')
data_write(file, clawdata, 'src_split', '(source term splitting)')
data_write(file, clawdata, 'mcapa', '(aux index for capacity fcn)')
data_write(file, clawdata, 'maux', '(number of aux variables)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'meqn', '(number of equations)')
data_write(file, clawdata, 'mwaves', '(number of waves)')
data_write(file, clawdata, 'mthlim', '(limiter choice for each wave)')
data_write(file, clawdata, None)
data_write(file, clawdata, 't0', '(initial time)')
data_write(file, clawdata, 'xlower', '(xlower)')
data_write(file, clawdata, 'xupper', '(xupper)')
if ndim > 1:
data_write(file, clawdata, 'ylower', '(ylower)')
data_write(file, clawdata, 'yupper', '(yupper)')
if ndim == 3:
data_write(file, clawdata, 'zlower', '(zlower)')
data_write(file, clawdata, 'zupper', '(zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'mbc', '(number of ghost cells)')
data_write(file, clawdata, 'mthbc_xlower', '(type of BC at xlower)')
data_write(file, clawdata, 'mthbc_xupper', '(type of BC at xupper)')
if ndim > 1:
data_write(file, clawdata, 'mthbc_ylower', '(type of BC at ylower)')
data_write(file, clawdata, 'mthbc_yupper', '(type of BC at yupper)')
if ndim == 3:
data_write(file, clawdata, 'mthbc_zlower', '(type of BC at zlower)')
data_write(file, clawdata, 'mthbc_zupper', '(type of BC at zupper)')
data_write(file, clawdata, 'restart', '(1 to restart from a past run)')
data_write(file, clawdata, 'N_restart', '(which frame to restart from)')
data_write(file, clawdata, None)
file.close()
def make_amrclawdatafile(clawdata):
r"""
Take the data specified in clawdata and write it to claw.data in the
form required by the Fortran code lib/main.f95.
"""
# open file and write a warning header:
file = open_datafile('amr2ez.data')
ndim = clawdata.ndim
#data_write(file, clawdata, 'ndim', '(number of dimensions)')
data_write(file, clawdata, 'mx', '(cells in x direction)')
data_write(file, clawdata, 'my', '(cells in y direction)')
if ndim == 3:
data_write(file, clawdata, 'mz', '(cells in z direction)')
data_write(file, clawdata, 'mxnest', '(max number of grid levels)')
if len(clawdata.inratx) < max(abs(clawdata.mxnest)-1, 1):
raise ValueError("*** Error in data parameter: " + \
"require len(inratx) >= %s " % max(abs(clawdata.mxnest) - 1, 1))
data_write(file, clawdata, 'inratx', '(refinement ratios)')
if clawdata.mxnest < 0:
# negative mxnest indicates anisotropic refinement
if len(clawdata.inraty) < max(abs(clawdata.mxnest)-1, 1):
raise ValueError("*** Error in data parameter: " + \
"require len(inraty) >= %s " % max(abs(clawdata.mxnest) - 1, 1))
data_write(file, clawdata, 'inraty', '(refinement ratios)')
if ndim == 3:
if len(clawdata.inratz) < max(abs(clawdata.mxnest)-1, 1):
raise ValueError("*** Error in data parameter: " + \
"require len(inratz) >= %s " % max(abs(clawdata.mxnest) - 1, 1))
data_write(file, clawdata, 'inratz', '(refinement ratios)')
if len(clawdata.inratt) < max(abs(clawdata.mxnest)-1, 1):
raise ValueError("*** Error in data parameter: " + \
"require len(inratt) >= %s " % max(abs(clawdata.mxnest) - 1, 1))
data_write(file, clawdata, 'inratt', '(refinement ratios)')
data_write(file, clawdata, None) # writes blank line
data_write(file, clawdata, 'nout', '(number of output times)')
data_write(file, clawdata, 'outstyle', '(style of specifying output times)')
if clawdata.outstyle == 1:
data_write(file, clawdata, 'tfinal', '(final time)')
elif clawdata.outstyle == 2:
data_write(file, clawdata, 'tout', '(output times)')
elif clawdata.outstyle == 3:
data_write(file, clawdata, 'iout', '(output every iout steps)')
elif clawdata.outstyle == 4:
data_write(file, clawdata, 'output_time_interval', '(between outputs)')
data_write(file, clawdata, 'tfinal', '(final time)')
else:
print '*** Error: unrecognized outstyle'
raise
return
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_initial', '(initial time step dt)')
data_write(file, clawdata, 'dt_max', '(max allowable dt)')
data_write(file, clawdata, 'cfl_max', '(max allowable Courant number)')
data_write(file, clawdata, 'cfl_desired', '(desired Courant number)')
data_write(file, clawdata, 'max_steps', '(max time steps per call to claw)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_variable', '(1 for variable dt, 0 for fixed)')
data_write(file, clawdata, 'order', '(1 or 2)')
if ndim == 1:
data_write(file, clawdata, 'order_trans', '(not used in 1d)')
else:
data_write(file, clawdata, 'order_trans', '(transverse order)')
data_write(file, clawdata, 'verbosity', '(verbosity of output)')
data_write(file, clawdata, 'src_split', '(source term splitting)')
data_write(file, clawdata, 'mcapa', '(aux index for capacity fcn)')
data_write(file, clawdata, 'maux', '(number of aux variables)')
if len(clawdata.auxtype) != clawdata.maux:
file.close()
print "*** Error: An auxtype array must be specified of length maux"
raise AttributeError, "require len(clawdata.auxtype) == clawdata.maux"
for i in range(clawdata.maux):
file.write("'%s'\n" % clawdata.auxtype[i])
data_write(file, clawdata, None)
data_write(file, clawdata, 'meqn', '(number of equations)')
data_write(file, clawdata, 'mwaves', '(number of waves)')
data_write(file, clawdata, 'mthlim', '(limiter choice for each wave)')
data_write(file, clawdata, None)
data_write(file, clawdata, 't0', '(initial time)')
data_write(file, clawdata, 'xlower', '(xlower)')
data_write(file, clawdata, 'xupper', '(xupper)')
data_write(file, clawdata, 'ylower', '(ylower)')
data_write(file, clawdata, 'yupper', '(yupper)')
if ndim == 3:
data_write(file, clawdata, 'zlower', '(zlower)')
data_write(file, clawdata, 'zupper', '(zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'mbc', '(number of ghost cells)')
data_write(file, clawdata, 'mthbc_xlower', '(type of BC at xlower)')
data_write(file, clawdata, 'mthbc_xupper', '(type of BC at xupper)')
data_write(file, clawdata, 'mthbc_ylower', '(type of BC at ylower)')
data_write(file, clawdata, 'mthbc_yupper', '(type of BC at yupper)')
if ndim == 3:
data_write(file, clawdata, 'mthbc_zlower', '(type of BC at zlower)')
data_write(file, clawdata, 'mthbc_zupper', '(type of BC at zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'restart', '(1 to restart from a past run)')
data_write(file, clawdata, 'checkpt_iousr', '(how often to checkpoint)')
if clawdata.checkpt_iousr < 0:
data_write(file, clawdata, 'tchk', '(checkpoint times)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'tol', '(tolerance for Richardson extrap)')
data_write(file, clawdata, 'tolsp', '(tolerance used in flag2refine)')
data_write(file, clawdata, 'kcheck', '(how often to regrid)')
data_write(file, clawdata, 'ibuff', '(buffer zone around flagged pts)')
data_write(file, clawdata, 'cutoff', '(efficiency cutoff for grid gen.)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'PRINT', '(print to fort.amr)')
data_write(file, clawdata, 'NCAR', '(obsolete!)')
data_write(file, clawdata, 'fortq', '(Output to fort.q* files)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dprint', '(print domain flags)')
data_write(file, clawdata, 'eprint', '(print err est flags)')
data_write(file, clawdata, 'edebug', '(even more err est flags)')
data_write(file, clawdata, 'gprint', '(grid bisection/clustering)')
data_write(file, clawdata, 'nprint', '(proper nesting output)')
data_write(file, clawdata, 'pprint', '(proj. of tagged points)')
data_write(file, clawdata, 'rprint', '(print regridding summary)')
data_write(file, clawdata, 'sprint', '(space/memory output)')
data_write(file, clawdata, 'tprint', '(time step reporting each level)')
data_write(file, clawdata, 'uprint', '(update/upbnd reporting)')
file.close()
def make_sharpclawdatafile(clawdata):
r"""
Take the data specified in clawdata and write it to sharpclaw.data in the
form required by the Fortran code lib/main.f95.
"""
# open file and write a warning header:
file = open_datafile('sharpclaw.data')
ndim = clawdata.ndim
data_write(file, clawdata, 'ndim', '(number of dimensions)')
data_write(file, clawdata, 'mx', '(cells in x direction)')
if ndim > 1:
data_write(file, clawdata, 'my', '(cells in y direction)')
if ndim == 3:
data_write(file, clawdata, 'mz', '(cells in z direction)')
data_write(file, clawdata, None) # writes blank line
data_write(file, clawdata, 'nout', '(number of output times)')
data_write(file, clawdata, 'outstyle', '(style of specifying output times)')
if clawdata.outstyle == 1:
data_write(file, clawdata, 'tfinal', '(final time)')
elif clawdata.outstyle == 2:
data_write(file, clawdata, 'tout', '(output times)')
elif clawdata.outstyle == 3:
data_write(file, clawdata, 'iout', '(output every iout steps)')
else:
print '*** Error: unrecognized outstyle'
raise
return
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_initial', '(initial time step dt)')
data_write(file, clawdata, 'dt_max', '(max allowable dt)')
data_write(file, clawdata, 'cfl_max', '(max allowable Courant number)')
data_write(file, clawdata, 'cfl_desired', '(desired Courant number)')
data_write(file, clawdata, 'max_steps', '(max time steps per call to claw)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'dt_variable', '(1 for variable dt, 0 for fixed)')
data_write(file, clawdata, 'time_integrator', '(time stepping scheme)')
data_write(file, clawdata, 'verbosity', '(verbosity of output)')
data_write(file, clawdata, 'src_term', '(source term present)')
data_write(file, clawdata, 'mcapa', '(aux index for capacity fcn)')
data_write(file, clawdata, 'maux', '(number of aux variables)')
data_write(file, clawdata, 'tfluct_solver', '(total fluctuation solver)')
data_write(file, clawdata, 'char_decomp', '(characteristic decomposition)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'meqn', '(number of equations)')
data_write(file, clawdata, 'mwaves', '(number of waves)')
data_write(file, clawdata, 'lim_type', '(0=None, 1=TVD, 2=WENO)')
data_write(file, clawdata, 'mthlim', '(limiter choice for each wave)')
data_write(file, clawdata, None)
data_write(file, clawdata, 't0', '(initial time)')
data_write(file, clawdata, 'xlower', '(xlower)')
data_write(file, clawdata, 'xupper', '(xupper)')
if ndim > 1:
data_write(file, clawdata, 'ylower', '(ylower)')
data_write(file, clawdata, 'yupper', '(yupper)')
if ndim == 3:
data_write(file, clawdata, 'zlower', '(zlower)')
data_write(file, clawdata, 'zupper', '(zupper)')
data_write(file, clawdata, None)
data_write(file, clawdata, 'mbc', '(number of ghost cells)')
data_write(file, clawdata, 'mthbc_xlower', '(type of BC at xlower)')
data_write(file, clawdata, 'mthbc_xupper', '(type of BC at xupper)')
if ndim > 1:
data_write(file, clawdata, 'mthbc_ylower', '(type of BC at ylower)')
data_write(file, clawdata, 'mthbc_yupper', '(type of BC at yupper)')
if ndim == 3:
data_write(file, clawdata, 'mthbc_zlower', '(type of BC at zlower)')
data_write(file, clawdata, 'mthbc_zupper', '(type of BC at zupper)')
data_write(file, clawdata, 'restart', '(1 to restart from a past run)')
data_write(file, clawdata, 'N_restart', '(which frame to restart from)')
data_write(file, clawdata, None)
file.close()
def make_userdatafile(userdata):
r"""
Create the data file using the parameters in userdata.
The parameters will be written to this file in the same order they were
specified using userdata.add_attribute.
Presumably the user will read these in using a Fortran routine, such as
setprob.f95, and the order is important.
"""
# open file and write a warning header:
file = open_datafile(userdata._UserData__fname)
# write all the parameters:
for param in userdata.attributes:
data_write(file, userdata, param, \
userdata._UserData__descr[param])
file.close()
def make_setgauges_datafile(clawdata):
"""
Create setgauges.data using gauges attribute of clawdata.
"""
gauges = getattr(clawdata,'gauges',[])
ngauges = len(gauges)
print 'Creating data file setgauges.data'
# open file and write a warning header:
file = open_datafile('setgauges.data')
file.write("%4i =: ngauges\n" % ngauges)
gaugeno_used = []
for gauge in gauges:
gaugeno = gauge[0]
if gaugeno in gaugeno_used:
print "*** Gauge number %s used more than once! " % gaugeno
raise Exception("Repeated gauge number")
else:
gaugeno_used.append(gauge[0])
file.write("%4i %19.10e %17.10e %13.6e %13.6e\n" % tuple(gauge))
# or use this variant with =:
#gauge.append(gaugeno)
#file.write("%4i %19.10e %17.10e %13.6e %13.6e =: gauge%s\n" % tuple(gauge))
file.close()
#-----------------------------------------------------
# New version 6/30/09
class ClawRunData(Data):
r"""
Object that will be written out to claw.data.
"""
def __init__(self, pkg, ndim):
super(ClawRunData,self).__init__()
self.add_attribute('pkg',pkg)
self.add_attribute('ndim',ndim)
self.add_attribute('datalist',[])
if pkg.lower() in ['classic', 'classicclaw']:
self.add_attribute('xclawcmd', 'xclaw')
# Required data set for basic run parameters:
clawdata = ClawInputData(ndim)
self.add_attribute('clawdata', clawdata)
self.datalist.append(clawdata)
elif pkg.lower() in ['amrclaw', 'amr']:
self.add_attribute('xclawcmd', 'xamr')
# Required data set for basic run parameters:
clawdata = AmrclawInputData(ndim)
self.add_attribute('clawdata', clawdata)
self.datalist.append(clawdata)
elif pkg.lower() in ['geoclaw']:
self.add_attribute('xclawcmd', 'xgeoclaw')
# Required data set for basic run parameters:
clawdata = AmrclawInputData(ndim)
self.add_attribute('clawdata', clawdata)
self.datalist.append(clawdata)
geodata = GeoclawInputData(ndim)
self.add_attribute('geodata', geodata)
self.datalist.append(geodata)
elif pkg.lower() in ['sharpclaw']:
self.add_attribute('xclawcmd', 'xsclaw')
# Required data set for basic run parameters:
clawdata = SharpclawInputData(ndim)
self.add_attribute('clawdata', clawdata)
self.datalist.append(clawdata)
else:
raise AttributeError("Unrecognized Clawpack pkg = %s" % pkg)
def new_UserData(self,name,fname):
r"""
Create a new attribute called name
for application specific data to be written
to the data file fname.
"""
userdata = UserData(fname)
self.datalist.append(userdata)
exec('self.%s = userdata' % name)
return userdata
def add_GaugeData(self):
r"""
Create a gaugedata attribute for writing to gauges.data.
"""
gaugedata = GaugeData(self.ndim)
self.datalist.append(gaugedata)
self.gaugedata = gaugedata
return gaugedata
def write(self):
for d in self.datalist:
d.write()
class UserData(Data):
r"""
Object that will be written out to user file such as setprob.data, as
determined by the fname attribute.
"""
def __init__(self, fname):
super(UserData,self).__init__()
self.__fname = fname # file to be read by Fortran for this data
self.__descr = {} # dictionary to hold descriptions
def add_param(self,name,value,descr=''):
self.add_attribute(name,value)
self.__descr[name] = descr
def write(self):
print 'Creating data file %s' % self.__fname
make_userdatafile(self)
class GaugeData(Data):
r"""
Data to be written out to gauge.data specifying gauges.
DEPRECATED: Use GeoclawInputData instead.
"""
def __init__(self, ndim):
super(GaugeData,self).__init__()
self.add_attribute('ndim',ndim)
self.add_attribute('ngauges',0)
self.__gauge_dict = {}
def add_gauge(self,gaugeno,location,time_interval):
self.__gauge_dict[gaugeno] = (gaugeno, location, time_interval)
self.ngauges = len(self.__gauge_dict)
def write(self):
print 'Creating data file gauges.data'
# open file and write a warning header:
file = open_datafile('gauges.data')
data_write(file, self, 'ngauges', 'Number of gauges')
data_write(file, self, None)
ndim = self.ndim
# write a line for each gauge:
for (gaugeno, gdata) in self.__gauge_dict.iteritems():
tmin = gdata[2][0]
tmax = gdata[2][1]
if isinstance(gdata[1],(list,tuple)):
xyz = gdata[1]
x = xyz[0]
if ndim>1:
y = xyz[1]
if ndim>2:
z = xyz[2]
else:
x = gdata[1]
if ndim==1:
file.write('%i %e %e %e' % (gdata[0],x,tmin,tmax))
elif ndim==2:
file.write('%i %e %e %e %e' % (gdata[0],x,y,tmin,tmax))
elif ndim==3:
file.write('%i %e %e %e %e %e' % (gdata[0],x,y,z,tmin,tmax))
printxyz = {1: 'x ', 2: 'x y ', 3: 'x y z'}
file.write('\n\n# Format of each line: \n# gaugeno %s tmin tmax'\
% printxyz[ndim])
file.close()
class GeoclawInputData(Data):
r"""
Object that will be written out to the various GeoClaw data files.
"""
def __init__(self, ndim):
super(GeoclawInputData,self).__init__()
# Set default values:
self.add_attribute('igravity',1)
self.add_attribute('iqinit',0)
self.add_attribute('icoriolis',1)
self.add_attribute('Rearth',6367500.0)
self.add_attribute('variable_dt_refinement_ratios',False)
# NEED TO CONTINUE!
def write(self):
print 'Creating data file setgeo.data'
# open file and write a warning header:
file = open_datafile('setgeo.data')
data_write(file, self, 'igravity')
data_write(file, self, 'gravity')
data_write(file, self, 'icoordsys')
data_write(file, self, 'icoriolis')
data_write(file, self, 'Rearth')
data_write(file, self, 'variable_dt_refinement_ratios')
file.close()
print 'Creating data file settsunami.data'
# open file and write a warning header:
file = open_datafile('settsunami.data')
data_write(file, self, 'sealevel')
data_write(file, self, 'drytolerance')
data_write(file, self, 'wavetolerance')
data_write(file, self, 'depthdeep')
data_write(file, self, 'maxleveldeep')
data_write(file, self, 'ifriction')
data_write(file, self, 'coeffmanning')
data_write(file, self, 'frictiondepth')
file.close()
print 'Creating data file settopo.data'
# open file and write a warning header:
file = open_datafile('settopo.data')
self.ntopofiles = len(self.topofiles)
data_write(file, self, 'ntopofiles')
for tfile in self.topofiles:
try:
fname = os.path.abspath(tfile[-1])
except:
print "*** Error: file not found: ",tfile[-1]
raise MissingFile("file not found")
file.write("\n'%s' \n " % fname)
file.write("%3i %3i %3i %20.10e %20.10e \n" % tuple(tfile[:-1]))
file.close()
print 'Creating data file setdtopo.data'
# open file and write a warning header:
file = open_datafile('setdtopo.data')
self.mdtopofiles = len(self.dtopofiles)
data_write(file, self, 'mdtopofiles')
data_write(file, self, None)
for tfile in self.dtopofiles:
try:
fname = "'%s'" % os.path.abspath(tfile[-1])
except:
print "*** Error: file not found: ",tfile[-1]
raise MissingFile("file not found")
file.write("\n%s \n" % fname)
file.write("%3i %3i %3i\n" % tuple(tfile[:-1]))
file.close()
print 'Creating data file setqinit.data'
# open file and write a warning header:
file = open_datafile('setqinit.data')
# self.iqinit tells which component of q is perturbed!
data_write(file, self, 'iqinit')
data_write(file, self, None)
for tfile in self.qinitfiles:
try:
fname = "'%s'" % os.path.abspath(tfile[-1])
except:
print "*** Error: file not found: ",tfile[-1]
raise MissingFile("file not found")
file.write("\n%s \n" % fname)
file.write("%3i %3i \n" % tuple(tfile[:-1]))
file.close()
make_setgauges_datafile(self)
# print 'Creating data file setgauges.data'
# # open file and write a warning header:
# file = open_datafile('setgauges.data')
# self.ngauges = len(self.gauges)
# data_write(file, self, 'ngauges')
# data_write(file, self, None)
# gaugeno_used = []
# for gauge in self.gauges:
# gaugeno = gauge[0]
# if gaugeno in gaugeno_used:
# print "*** Gauge number %s used more than once! " % gaugeno
# raise Exception("Repeated gauge number")
# else:
# gaugeno_used.append(gauge[0])
# gauge.append(gaugeno)
# file.write("%3i %19.10e %19.10e %15.6e %15.6e =: gauge%s\n" % tuple(gauge))
# file.close()
print 'Creating data file setfixedgrids.data'
# open file and write a warning header:
file = open_datafile('setfixedgrids.data')
self.nfixedgrids = len(self.fixedgrids)
data_write(file, self, 'nfixedgrids')
data_write(file, self, None)
for fixedgrid in self.fixedgrids:
file.write(11*"%g " % tuple(fixedgrid) +"\n")
file.close()
print 'Creating data file setregions.data'
# open file and write a warning header:
file = open_datafile('setregions.data')
self.nregions = len(self.regions)
data_write(file, self, 'nregions')
data_write(file, self, None)
for regions in self.regions:
file.write(8*"%g " % tuple(regions) +"\n")
file.close()
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from argparse import ArgumentParser, _HelpAction
import copy
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.errors import ParseError, RegistrationError
from pants.option.help_formatter import PantsHelpFormatter
from pants.option.ranked_value import RankedValue
# Standard ArgumentParser prints usage and exits on error. We subclass so we can raise instead.
# Note that subclassing ArgumentParser for this purpose is allowed by the argparse API.
class CustomArgumentParser(ArgumentParser):
def error(self, message):
raise ParseError(message)
def walk_actions(self):
"""Iterates over the argparse.Action objects for options registered on this parser."""
for action_group in self._action_groups:
for action in action_group._group_actions:
if not isinstance(action, _HelpAction):
yield action
class Parser(object):
"""An argument parser in a hierarchy.
Each node in the hierarchy is a 'scope': the root is the global scope, and the parent of
a node is the scope it's immediately contained in. E.g., the 'compile.java' scope is
a child of the 'compile' scope, which is a child of the global scope.
Options registered on a parser are also registered transitively on all the scopes it encloses.
Registration must be in outside-in order: we forbid registering options on an outer scope if
we've already registered an option on one of its inner scopes. This is to ensure that
re-registering the same option name on an inner scope correctly replaces the identically-named
option from the outer scope.
:param env: a dict of environment variables.
:param config: data from a config file (must support config.get[list](section, name, default=)).
:param scope: the scope this parser acts for.
:param parent_parser: the parser for the scope immediately enclosing this one, or
None if this is the global scope.
"""
def __init__(self, env, config, scope, parent_parser):
self._env = env
self._config = config
self._scope = scope
# If True, no more registration is allowed on this parser.
self._frozen = False
# The argparser we use for actually parsing args.
self._argparser = CustomArgumentParser(conflict_handler='resolve')
# The argparser we use for formatting help messages.
# We don't use self._argparser for this as it will have all options from enclosing scopes
# registered on it too, which would create unnecessarily repetitive help messages.
self._help_argparser = CustomArgumentParser(conflict_handler='resolve',
formatter_class=PantsHelpFormatter)
# If True, we have at least one option to show help for.
self._has_help_options = False
# Map of external to internal dest names. See docstring for _set_dest below.
self._dest_forwardings = {}
# A Parser instance, or None for the global scope parser.
self._parent_parser = parent_parser
# List of Parser instances.
self._child_parsers = []
if self._parent_parser:
self._parent_parser._register_child_parser(self)
def parse_args(self, args, namespace):
"""Parse the given args and set their values onto the namespace object's attributes."""
namespace.add_forwardings(self._dest_forwardings)
new_args = self._argparser.parse_args(args)
namespace.update(vars(new_args))
return namespace
def format_help(self):
"""Return a help message for the options registered on this object."""
return self._help_argparser.format_help() if self._has_help_options else ''
def register(self, *args, **kwargs):
"""Register an option, using argparse params."""
if self._frozen:
raise RegistrationError('Cannot register option {0} in scope {1} after registering options '
'in any of its inner scopes.'.format(args[0], self._scope))
# Prevent further registration in enclosing scopes.
ancestor = self._parent_parser
while ancestor:
ancestor._freeze()
ancestor = ancestor._parent_parser
self._validate(args, kwargs)
dest = self._set_dest(args, kwargs)
# Is this a boolean flag?
if kwargs.get('action') in ('store_false', 'store_true'):
inverse_args = []
help_args = []
for flag in args:
if flag.startswith('--') and not flag.startswith('--no-'):
inverse_args.append('--no-' + flag[2:])
help_args.append('--[no-]{0}'.format(flag[2:]))
else:
help_args.append(flag)
else:
inverse_args = None
help_args = args
# Register the option, only on this scope, for the purpose of displaying help.
# Note that we'll only display the default value for this scope, even though the
# default may be overridden in inner scopes.
raw_default = self._compute_default(dest, kwargs).value
kwargs_with_default = dict(kwargs, default=raw_default)
self._help_argparser.add_argument(*help_args, **kwargs_with_default)
self._has_help_options = True
# Register the option for the purpose of parsing, on this and all enclosed scopes.
if inverse_args:
inverse_kwargs = self._create_inverse_kwargs(kwargs)
self._register_boolean(dest, args, kwargs, inverse_args, inverse_kwargs)
else:
self._register(dest, args, kwargs)
def _register(self, dest, args, kwargs):
"""Recursively register the option for parsing."""
ranked_default = self._compute_default(dest, kwargs)
kwargs_with_default = dict(kwargs, default=ranked_default)
self._argparser.add_argument(*args, **kwargs_with_default)
# Propagate registration down to inner scopes.
for child_parser in self._child_parsers:
child_parser._register(dest, args, kwargs)
def _register_boolean(self, dest, args, kwargs, inverse_args, inverse_kwargs):
"""Recursively register the boolean option, and its inverse, for parsing."""
group = self._argparser.add_mutually_exclusive_group()
ranked_default = self._compute_default(dest, kwargs)
kwargs_with_default = dict(kwargs, default=ranked_default)
group.add_argument(*args, **kwargs_with_default)
group.add_argument(*inverse_args, **inverse_kwargs)
# Propagate registration down to inner scopes.
for child_parser in self._child_parsers:
child_parser._register_boolean(dest, args, kwargs, inverse_args, inverse_kwargs)
def _validate(self, args, kwargs):
"""Ensure that the caller isn't trying to use unsupported argparse features."""
for arg in args:
if not arg.startswith('-'):
raise RegistrationError('Option {0} in scope {1} must begin '
'with a dash.'.format(arg, self._scope))
if not arg.startswith('--') and len(arg) > 2:
raise RegistrationError('Multicharacter option {0} in scope {1} must begin '
'with a double-dash'.format(arg, self._scope))
if 'nargs' in kwargs and kwargs['nargs'] != '?':
raise RegistrationError('nargs={0} unsupported in registration of option {1} in '
'scope {2}.'.format(kwargs['nargs'], args, self._scope))
if 'required' in kwargs:
raise RegistrationError('{0} unsupported in registration of option {1} in '
'scope {2}.'.format(k, args, self._scope))
def _set_dest(self, args, kwargs):
"""Maps the externally-used dest to a scoped one only seen internally.
If an option is re-registered in an inner scope, it'll shadow the external dest but will
use a different internal one. This is important in the case that an option is registered
with two names (say -x, --xlong) and we only re-register one of them, say --xlong, in an
inner scope. In this case we no longer want them to write to the same dest, so we can
use both (now with different meanings) in the inner scope.
Note: Modfies kwargs.
"""
dest = self._select_dest(args, kwargs)
scoped_dest = '_{0}_{1}__'.format(self._scope or 'DEFAULT', dest)
# Make argparse write to the internal dest.
kwargs['dest'] = scoped_dest
# Make reads from the external dest forward to the internal one.
self._dest_forwardings[dest] = scoped_dest
# Also forward all option aliases, so we can reference -x (as options.x) in the example above.
for arg in args:
self._dest_forwardings[arg.lstrip('-').replace('-', '_')] = scoped_dest
return dest
def _select_dest(self, args, kwargs):
"""Select the dest name for the option.
Replicated from the dest inference logic in argparse:
'--foo-bar' -> 'foo_bar' and '-x' -> 'x'.
"""
dest = kwargs.get('dest')
if dest:
return dest
arg = next((a for a in args if a.startswith('--')), args[0])
return arg.lstrip('-').replace('-', '_')
def _compute_default(self, dest, kwargs):
"""Compute the default value to use for an option's registration.
The source of the default value is chosen according to the ranking in RankedValue.
"""
config_section = 'DEFAULT' if self._scope == GLOBAL_SCOPE else self._scope
env_var = 'PANTS_{0}_{1}'.format(config_section.upper().replace('.', '_'), dest.upper())
value_type = kwargs.get('type', str)
env_val_str = self._env.get(env_var) if self._env else None
env_val = None if env_val_str is None else value_type(env_val_str)
if kwargs.get('action') == 'append':
config_val_strs = self._config.getlist(config_section, dest) if self._config else None
config_val = (None if config_val_strs is None else
[value_type(config_val_str) for config_val_str in config_val_strs])
default = []
else:
config_val_str = (self._config.get(config_section, dest, default=None)
if self._config else None)
config_val = None if config_val_str is None else value_type(config_val_str)
default = None
hardcoded_val = kwargs.get('default')
return RankedValue.choose(None, env_val, config_val, hardcoded_val, default)
def _create_inverse_kwargs(self, kwargs):
"""Create the kwargs for registering the inverse of a boolean flag."""
inverse_kwargs = copy.copy(kwargs)
inverse_action = 'store_true' if kwargs.get('action') == 'store_false' else 'store_false'
inverse_kwargs['action'] = inverse_action
inverse_kwargs.pop('default', None)
return inverse_kwargs
def _register_child_parser(self, child):
self._child_parsers.append(child)
def _freeze(self):
self._frozen = True
def __str__(self):
return 'Parser(%s)' % self._scope
|
|
"""This module contains all Cupy specific kernel_tuner functions"""
from __future__ import print_function
import logging
import time
import numpy as np
from kernel_tuner.observers import BenchmarkObserver
#embedded in try block to be able to generate documentation
#and run tests without cupy installed
try:
import cupy as cp
except ImportError:
cp = None
class CupyRuntimeObserver(BenchmarkObserver):
""" Observer that measures time using CUDA events during benchmarking """
def __init__(self, dev):
self.dev = dev
self.stream = dev.stream
self.start = dev.start
self.end = dev.end
self.times = []
def after_finish(self):
self.times.append(cp.cuda.get_elapsed_time(self.start, self.end)) #ms
def get_results(self):
results = {"time": np.average(self.times), "times": self.times.copy()}
self.times = []
return results
class CupyFunctions:
"""Class that groups the Cupy functions on maintains state about the device"""
def __init__(self, device=0, iterations=7, compiler_options=None, observers=None):
"""instantiate CudaFunctions object used for interacting with the CUDA device
Instantiating this object will inspect and store certain device properties at
runtime, which are used during compilation and/or execution of kernels by the
kernel tuner. It also maintains a reference to the most recently compiled
source module for copying data to constant memory before kernel launch.
:param device: Number of CUDA device to use for this context
:type device: int
:param iterations: Number of iterations used while benchmarking a kernel, 7 by default.
:type iterations: int
"""
self.allocations = []
self.texrefs = []
if not cp:
raise ImportError("Error: cupy not installed, please install e.g. " +
"using 'pip install cupy-cuda111', please check https://github.com/cupy/cupy.")
#select device
self.dev = dev = cp.cuda.Device(device).__enter__()
#inspect device properties
self.devprops = dev.attributes
self.cc = dev.compute_capability
self.max_threads = self.devprops['MaxThreadsPerBlock']
self.iterations = iterations
self.current_module = None
self.func = None
self.compiler_options = compiler_options or []
#create a stream and events
self.stream = cp.cuda.Stream()
self.start = cp.cuda.Event()
self.end = cp.cuda.Event()
#default dynamically allocated shared memory size, can be overwritten using smem_args
self.smem_size = 0
#setup observers
self.observers = observers or []
self.observers.append(CupyRuntimeObserver(self))
for obs in self.observers:
obs.register_device(self)
#collect environment information
env = dict()
cupy_info = str(cp._cupyx.get_runtime_info()).split("\n")[:-1]
info_dict = {s.split(":")[0].strip():s.split(":")[1].strip() for s in cupy_info}
env["device_name"] = info_dict[f'Device {device} Name']
env["cuda_version"] = cp.cuda.runtime.driverGetVersion()
env["compute_capability"] = self.cc
env["iterations"] = self.iterations
env["compiler_options"] = compiler_options
env["device_properties"] = self.devprops
self.env = env
self.name = env["device_name"]
def __enter__(self):
return self
def __exit__(self, *exc):
"""destroy the device context"""
self.dev.__exit__()
def ready_argument_list(self, arguments):
"""ready argument list to be passed to the kernel, allocates gpu mem
:param arguments: List of arguments to be passed to the kernel.
The order should match the argument list on the CUDA kernel.
Allowed values are numpy.ndarray, and/or numpy.int32, numpy.float32, and so on.
:type arguments: list(numpy objects)
:returns: A list of arguments that can be passed to an CUDA kernel.
:rtype: list( cupy.ndarray, numpy.int32, ... )
"""
gpu_args = []
for arg in arguments:
# if arg i is a numpy array copy to device
if isinstance(arg, np.ndarray):
alloc = cp.array(arg)
self.allocations.append(alloc)
gpu_args.append(alloc)
else: # if not a numpy array, just pass argument along
gpu_args.append(arg)
return gpu_args
def compile(self, kernel_instance):
"""call the CUDA compiler to compile the kernel, return the device function
:param kernel_name: The name of the kernel to be compiled, used to lookup the
function after compilation.
:type kernel_name: string
:param kernel_string: The CUDA kernel code that contains the function `kernel_name`
:type kernel_string: string
:returns: An CUDA kernel that can be called directly.
:rtype: cupy.RawKernel
"""
kernel_string = kernel_instance.kernel_string
kernel_name = kernel_instance.name
compiler_options = self.compiler_options
if not any(['--std=' in opt for opt in self.compiler_options]):
compiler_options = ['--std=c++11'] + self.compiler_options
options = tuple(compiler_options)
self.current_module = cp.RawModule(code=kernel_string, options=options,
name_expressions=[kernel_name])
self.func = self.current_module.get_function(kernel_name)
return self.func
def benchmark(self, func, gpu_args, threads, grid):
"""runs the kernel and measures time repeatedly, returns average time
Runs the kernel and measures kernel execution time repeatedly, number of
iterations is set during the creation of CudaFunctions. Benchmark returns
a robust average, from all measurements the fastest and slowest runs are
discarded and the rest is included in the returned average. The reason for
this is to be robust against initialization artifacts and other exceptional
cases.
:param func: A cupy kernel compiled for this specific kernel configuration
:type func: cupy.RawKernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( cupy.ndarray, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
:returns: A dictionary with benchmark results.
:rtype: dict()
"""
result = dict()
self.dev.synchronize()
for _ in range(self.iterations):
for obs in self.observers:
obs.before_start()
self.dev.synchronize()
self.start.record(stream=self.stream)
self.run_kernel(func, gpu_args, threads, grid, stream=self.stream)
self.end.record(stream=self.stream)
for obs in self.observers:
obs.after_start()
while not self.end.done:
for obs in self.observers:
obs.during()
time.sleep(1e-6)
for obs in self.observers:
obs.after_finish()
for obs in self.observers:
result.update(obs.get_results())
return result
def copy_constant_memory_args(self, cmem_args):
"""adds constant memory arguments to the most recently compiled module
:param cmem_args: A dictionary containing the data to be passed to the
device constant memory. The format to be used is as follows: A
string key is used to name the constant memory symbol to which the
value needs to be copied. Similar to regular arguments, these need
to be numpy objects, such as numpy.ndarray or numpy.int32, and so on.
:type cmem_args: dict( string: numpy.ndarray, ... )
"""
for k, v in cmem_args.items():
symbol = self.current_module.get_global(k)
constant_mem = cp.ndarray(v.shape,v.dtype,symbol)
constant_mem[:] = cp.asarray(v)
def copy_shared_memory_args(self, smem_args):
"""add shared memory arguments to the kernel"""
self.smem_size = smem_args["size"]
def copy_texture_memory_args(self, texmem_args):
"""adds texture memory arguments to the most recently compiled module
:param texmem_args: A dictionary containing the data to be passed to the
device texture memory. See tune_kernel().
:type texmem_args: dict
"""
raise NotImplementedError('CuPy backend does not yet support texture memory')
def run_kernel(self, func, gpu_args, threads, grid, stream=None):
"""runs the CUDA kernel passed as 'func'
:param func: A cupy kernel compiled for this specific kernel configuration
:type func: cupy.RawKernel
:param gpu_args: A list of arguments to the kernel, order should match the
order in the code. Allowed values are either variables in global memory
or single values passed by value.
:type gpu_args: list( cupy.ndarray, numpy.int32, ...)
:param threads: A tuple listing the number of threads in each dimension of
the thread block
:type threads: tuple(int, int, int)
:param grid: A tuple listing the number of thread blocks in each dimension
of the grid
:type grid: tuple(int, int)
"""
func(grid, threads, gpu_args, stream=stream, shared_mem=self.smem_size)
def memset(self, allocation, value, size):
"""set the memory in allocation to the value in value
:param allocation: A GPU memory allocation unit
:type allocation: cupy.ndarray
:param value: The value to set the memory to
:type value: a single 8-bit unsigned int
:param size: The size of to the allocation unit in bytes
:type size: int
"""
allocation[:] = value
def memcpy_dtoh(self, dest, src):
"""perform a device to host memory copy
:param dest: A numpy array in host memory to store the data
:type dest: numpy.ndarray
:param src: A GPU memory allocation unit
:type src: cupy.ndarray
"""
if isinstance(dest, np.ndarray):
tmp = cp.asnumpy(src)
np.copyto(dest, tmp)
elif isinstance(dest, cp.ndarray):
cp.copyto(dest, src)
else:
raise ValueError("dest type not supported")
def memcpy_htod(self, dest, src):
"""perform a host to device memory copy
:param dest: A GPU memory allocation unit
:type dest: cupy.ndarray
:param src: A numpy array in host memory to store the data
:type src: numpy.ndarray
"""
if isinstance(src, np.ndarray):
src = cp.asarray(src)
cp.copyto(dest, src)
units = {'time': 'ms'}
|
|
import logging
from connexion import NoContent
from flask import request
from geoalchemy2 import WKTElement
import osmapi
import traceback
import api.models
from sqlalchemy import func
import datetime
from sqlalchemy import tuple_
from sqlalchemy.exc import SQLAlchemyError
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
db_session = api.models.init_db()
def get_missions(lat, lon, radius, limit, lang, user_id):
try:
location = WKTElement('POINT('+str(lon)+' '+str(lat)+')', srid=4326)
no_of_errors = 10
# get already solved error ids
already_solved = db_session.query(api.models.Solution.error_id). \
filter(api.models.Solution.user_id == user_id)
# get nearest neighbors candidates from location
q = db_session.query(api.models.kort_errors.schema, api.models.kort_errors.errorId) \
.filter((~api.models.kort_errors.errorId.in_(already_solved))) \
.order_by(api.models.kort_errors.geom.distance_centroid(location)) \
.limit(limit*no_of_errors).subquery()
# partition by error type
q = db_session.query(api.models.kort_errors, func.row_number().over(
partition_by=api.models.kort_errors.error_type).label("row_number")) \
.filter(tuple_(api.models.kort_errors.schema, api.models.kort_errors.errorId).in_(q))\
.filter(func.ST_DistanceSphere(api.models.kort_errors.geom, location) < radius).subquery()
# set max errors of each type
q = db_session.query(api.models.kort_errors).select_entity_from(q).filter(q.c.row_number <= limit/no_of_errors)
except Exception as e:
logger.error(traceback.format_exc())
return [p.dump(lang) for p in q][:limit]
def put_mission_solution(schema_id, error_id, body):
s = body['solution']
user_id = s['userId']
secret = request.headers.get('Authorization')
user = db_session.query(api.models.User).filter(api.models.User.id == user_id). \
filter(api.models.User.secret == secret).one_or_none()
if not user:
return NoContent, 401
try:
q = db_session.query(api.models.kort_errors).filter(api.models.kort_errors.errorId == error_id).filter(
api.models.kort_errors.schema == schema_id)
answer = s['value']
solved = s['solved']
lang = s['lang']
if s['option']:
answer = s['option']
if q.count() == 1:
error = q.first()
error_type = error.error_type
koins = error.fix_koin_count
if s['stats_enabled']:
koins += 1
# write solution to db
new_solution = api.models.Solution(
userId=user_id,
create_date=datetime.datetime.utcnow(),
error_id=error_id,
error_type=error_type,
koin_count=koins if solved else 0,
schema=schema_id,
osmId=s['osm_id'],
solution=answer,
complete=False,
valid=solved)
db_session.add(new_solution)
db_session.commit()
# get new badges for this user if solved
return create_new_achievements(user_id=user_id, lang=lang, mission_type=error_type) if solved else []
else:
return NoContent, 404
except SQLAlchemyError as e:
logger.error(traceback.format_exc())
db_session.rollback()
return NoContent, 404
def create_new_achievements(user_id, lang, mission_type):
all_new_badges = []
# get user badges
user_badge_ids = db_session.query(api.models.UserBadge.badge_id).filter(api.models.UserBadge.user_id == user_id)
# get no of missions in general
q = db_session.query(api.models.Solution).filter(api.models.Solution.user_id == user_id)
no_of_missions = q.count()
all_new_badges.extend(
get_not_achieved_badges_no_of_missions(user_badge_ids=user_badge_ids, no_of_missions=no_of_missions))
# no of mission for this type of mission
q = db_session.query(api.models.Solution).filter(api.models.Solution.user_id == user_id).\
filter(api.models.Solution.error_type == mission_type)
no_of_missions_type = q.count()
all_new_badges.extend(
get_not_achieved_badges_type_of_mission(user_badge_ids=user_badge_ids, no_of_missions_type=no_of_missions_type,
mission_type=mission_type))
# per day achievements
q = db_session.query(func.count('*')).filter(api.models.Solution.user_id == user_id).\
group_by(func.to_char(api.models.Solution.create_date, "DD.MM.YYYY"))
if len(q.all()) != 0:
max_number_of_missions_per_day = max(q.all())[0]
if (max_number_of_missions_per_day == 6):
new_badge = db_session.query(api.models.Badge).\
filter(api.models.Badge.name.like('six_per_day')).all()
all_new_badges.extend(new_badge)
# highscore achievements
q_highscore = db_session.query(api.models.Highscore).filter(api.models.Highscore.user_id == user_id).first()
if q_highscore:
rank = q_highscore.rank
if rank >= 1 and rank <= 3:
all_new_badges.extend(
get_not_achieved_badges_highscore(user_badge_ids=user_badge_ids, rank=rank))
for row in all_new_badges:
logger.debug('new achievement '+row.title)
# insert badges
badgesAchieved = []
for badge in all_new_badges:
db_session.add(
api.models.UserBadge(user_id=user_id, badge_id=badge.id, create_date=datetime.datetime.utcnow())
)
badgesAchieved.append(badge.dump(language=lang, achieved=True, achievementDate=datetime.datetime.utcnow()))
db_session.commit()
return badgesAchieved
def get_not_achieved_badges_no_of_missions(user_badge_ids, no_of_missions):
new_badges = db_session.query(api.models.Badge).\
filter(api.models.Badge.name.like('total_fix_count_%')).\
filter(api.models.Badge.compare_value <= no_of_missions).\
filter(~api.models.Badge.id.in_(user_badge_ids)).all()
return new_badges
def get_not_achieved_badges_type_of_mission(user_badge_ids, no_of_missions_type, mission_type):
new_badges = db_session.query(api.models.Badge).\
filter(api.models.Badge.name.like('fix_count_'+mission_type+'_%')).\
filter(api.models.Badge.compare_value <= no_of_missions_type).\
filter(~api.models.Badge.id.in_(user_badge_ids)).all()
return new_badges
def get_not_achieved_badges_highscore(user_badge_ids, rank):
new_badges = db_session.query(api.models.Badge). \
filter(api.models.Badge.name.like('highscore_place_' + str(rank))). \
filter(~api.models.Badge.id.in_(user_badge_ids)).all()
return new_badges
def get_osm_geom(osm_type, osm_id):
osm_api = osmapi.OsmApi()
try:
if osm_type == 'way':
nodes = osm_api.WayFull(osm_id)
ordered_node_list = []
way_dict = {}
way_order = []
for item in nodes:
if item.get('type') == 'node':
node = item.get('data')
lat = node.get('lat')
lon = node.get('lon')
way_dict[node.get('id')] = [lat, lon]
elif item.get('type') == 'way':
way = item.get('data')
way_order = way.get('nd')
for node_id in way_order:
ordered_node_list.append(way_dict.get(node_id))
return ordered_node_list
elif osm_type == 'node':
node = osm_api.NodeGet(osm_id)
lat = node.get('lat')
lon = node.get('lon')
return [lat, lon]
elif osm_type == 'relation':
nodes = osm_api.RelationFull(osm_id)
ordered_node_list = []
way_dict = {}
ways = {}
members = []
for item in nodes:
if item.get('type') == 'node':
node = item.get('data')
lat = node.get('lat')
lon = node.get('lon')
way_dict[node.get('id')] = [lat, lon]
elif item.get('type') == 'way':
way = item.get('data')
ways[way.get('id')] = way.get('nd')
elif item.get('type') == 'relation':
relation = item.get('data')
members = relation.get('member')
# choose only outer member since mapbox does not support multipolygons
for rel_member in members:
if rel_member.get('role') == 'outer': member = rel_member
for node_id in ways.get(member.get('ref')):
ordered_node_list.append(way_dict.get(node_id))
return ordered_node_list
except Exception as e:
logger.error(traceback.format_exc())
return []
|
|
from gameplay.game import Game
class Checkers(Game):
player_mapping = {
1: 'r',
2: 'w',
}
def __init__(self, board=None, current_player=1):
self.board = board or self.initial_board()
self.current_player = current_player
self.moves_without_capture = 0
@staticmethod
def initial_board():
"""
Initial board state.
"""
rx = ' r r r rr r r r r r r r'
wx = 'w w w w w w w ww w w w '
return rx + ' ' * 16 + wx
def draw_board(self):
s = ''
for i in range(0,64,8):
s += self.board[i:i+8]
s += '\n'
s += '=' * 8
return s
def is_king(self, char):
return not char.islower()
def get_direction(self, piece):
if self.is_king(piece):
return None
elif piece == 'r':
return 1
elif piece == 'w':
return -1
def get_opponent(self, p):
"""
Returns char symbol for opponent.
"""
if p in 'Rr':
return 'w'
elif p in 'Ww':
return 'r'
else:
return None
def result(self):
bl = self.board.lower()
if 'w' not in bl:
return 1
elif 'r' not in bl:
return 2
elif self.moves_without_capture >50:
return -1
elif not self.moves(self.player_mapping[self.current_player]):
# Player is stuck; no more moves, lose
return 3 - self.current_player
else:
return 0
def apply_move(self, board, move):
"""
Takes the string representation of the board
and apply the move to it. Return the resulting
board. move can be a multiple capture.
"""
start_pos, *visited_squares = move
for end_pos in visited_squares:
distance = abs(end_pos - start_pos)
board_list = list(board)
board_list[start_pos], board_list[end_pos] = board[end_pos], board[start_pos]
if distance > 9:
# Keep track of no-capture counter for tie games
self.moves_without_capture = 0
jumped_pos = (start_pos + end_pos) // 2
board_list[jumped_pos] = ' '
else:
self.moves_without_capture += 1
# Make pawn that reached the other end of the board kings
for position in range(0, 8):
if board_list[position] == 'w':
board_list[position] = 'W'
for position in range(56, 64):
if board_list[position] == 'r':
board_list[position] = 'R'
board = ''.join(board_list)
start_pos = end_pos
return board
def transition(self, move, _): # No need to use the third argument (player) for checkers
self.board = self.apply_move(self.board, move)
self.current_player = 3 - self.current_player # Toggle between 1 and 2.
def move_legal(self, move):
"""
Verify that a move is legal.
"""
# Make sure move is a list of ints
if not isinstance(move, list) or not all([isinstance(i, int) for i in move]):
return False
# Check for out-of-bounds moves
if any([pos<0 or pos>63 for pos in move]):
return False
start_position, *visited_squares = move
player = self.player_mapping[self.current_player]
# Check if starting position is correct player
if self.board[start_position].lower() != player:
return False
# Check for forced jumps.
valid_captures = self.captures(player)
if valid_captures:
return move in valid_captures
else:
return move in self.moves_(start_position)
def captures(self, player):
"""
Returns all potential capture moves
"""
positions = [i for i, char in enumerate(self.board) if char.lower() == player]
captures = []
for position in positions:
paths = self.captures_([position], self.board)
[captures.append(capture) for capture in paths if len(capture)>1]
return captures
def captures_(self, path, board):
"""
Returns all potential capture moves for a given start position and board
"""
start_pos = path[-1]
player = board[start_pos]
opponent = self.get_opponent(player)
direction = self.get_direction(player)
# Handle left and right borders
if start_pos % 8 in [0,1]:
end_positions = [start_pos + 18, start_pos - 14]
elif start_pos % 8 in [6,7]:
end_positions = [start_pos + 14, start_pos - 18]
else:
end_positions = [start_pos + 18, start_pos + 14, start_pos - 14, start_pos - 18]
# Handle bottom and top borders
end_positions = [pos for pos in end_positions if 0 <= pos <= 63]
# Make sure there is a captured pawn
end_positions = [pos for pos in end_positions if board[(start_pos+pos)//2].lower() == opponent]
# Make sure the end position is an empty sqare
end_positions = [pos for pos in end_positions if board[pos].lower() == ' ']
# Handle direction of play
if direction == 1:
end_positions = [pos for pos in end_positions if pos > start_pos]
elif direction == -1:
end_positions = [pos for pos in end_positions if pos < start_pos]
paths = []
for end_pos in end_positions:
new_path = path + [end_pos]
return self.captures_(new_path, self.apply_move(self.board, new_path))
else:
return [path]
def moves(self, player):
"""
All potential non-capture moves (1 diagonal square away)
"""
positions = [i for i, char in enumerate(self.board) if char.lower() == player]
moves = []
for pos in positions:
moves += self.moves_(pos)
return moves
def moves_(self, start_position):
"""
All potential non-capture moves (1 diagonal square away)
for a given start position
"""
player = self.board[start_position]
direction = self.get_direction(player)
# Handle left and right borders
if start_position % 8 == 0:
end_positions = [start_position + 9, start_position - 7]
elif start_position % 8 == 7:
end_positions = [start_position + 7, start_position - 9]
else:
end_positions = [start_position + 7, start_position + 9, start_position - 7, start_position - 9]
# Handle bottom and top borders
end_positions = [pos for pos in end_positions if 0 <= pos <= 63]
# Make sure end position is free
end_positions = [pos for pos in end_positions if self.board[pos] == ' ']
# Handle direction of play
if direction == 1:
end_positions = [pos for pos in end_positions if pos > start_position]
elif direction == -1:
end_positions = [pos for pos in end_positions if pos < start_position]
return [[start_position, p] for p in end_positions]
|
|
# django imports
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
# lfs imports
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.core.models import Shop
from lfs.core.signals import shop_changed
from lfs.core.utils import import_module
from lfs.core.utils import import_symbol
from lfs.core.utils import LazyEncoder
from lfs.core.widgets.image import LFSImageInput
from lfs.manage.views.lfs_portlets import portlets_inline
class ShopDataForm(ModelForm):
"""Form to edit shop data.
"""
def __init__(self, *args, **kwargs):
super(ShopDataForm, self).__init__(*args, **kwargs)
self.fields["image"].widget = LFSImageInput()
class Meta:
model = Shop
fields = ("name", "shop_owner", "from_email", "notification_emails",
"description", "image", "static_block", "checkout_type", "confirm_toc",
"google_analytics_id", "ga_site_tracking", "ga_ecommerce_tracking")
class ShopSEOForm(ModelForm):
"""Form to edit shop SEO data.
"""
class Meta:
model = Shop
fields = ("meta_title", "meta_keywords", "meta_description")
class ShopDefaultValuesForm(ModelForm):
"""Form to edit shop default values.
"""
class Meta:
model = Shop
fields = ("price_calculator", "product_cols", "product_rows", "category_cols",
"default_country", "invoice_countries", "shipping_countries", "default_locale", "use_international_currency_code")
@permission_required("core.manage_shop", login_url="/login/")
def manage_shop(request, template_name="manage/shop/shop.html"):
"""Displays the form to manage shop data.
"""
shop = lfs.core.utils.get_default_shop()
data_form = ShopDataForm(instance=shop)
seo_form = ShopSEOForm(instance=shop)
default_values_form = ShopDefaultValuesForm(instance=shop)
ong = lfs.core.utils.import_symbol(settings.LFS_ORDER_NUMBER_GENERATOR)
try:
order_number = ong.objects.get(id="order_number")
except ong.DoesNotExist:
order_number = ong.objects.create(id="order_number")
order_numbers_form = order_number.get_form(instance=order_number)
return render_to_response(template_name, RequestContext(request, {
"shop": shop,
"data": data_tab(request, shop, data_form),
"default_values": default_values_tab(request, shop, default_values_form),
"order_numbers": order_numbers_tab(request, shop, order_numbers_form),
"seo": seo_tab(request, shop, seo_form),
"portlets": portlets_inline(request, shop),
}))
# Parts
def data_tab(request, shop, form, template_name="manage/shop/data_tab.html"):
"""Renders the data tab of the shop.
"""
return render_to_string(template_name, RequestContext(request, {
"shop": shop,
"form": form,
}))
def order_numbers_tab(request, shop, form, template_name="manage/order_numbers/order_numbers_tab.html"):
"""Renders the ordern number tab of the shop.
"""
return render_to_string(template_name, RequestContext(request, {
"shop": shop,
"form": form,
}))
def default_values_tab(request, shop, form, template_name="manage/shop/default_values_tab.html"):
"""Renders the default value tab of the shop.
"""
return render_to_string(template_name, RequestContext(request, {
"shop": shop,
"form": form,
}))
def seo_tab(request, shop, form, template_name="manage/shop/seo_tab.html"):
"""Renders the SEO tab of the shop.
"""
return render_to_string(template_name, RequestContext(request, {
"shop": shop,
"form": form,
}))
# Actions
@permission_required("core.manage_shop", login_url="/login/")
@require_POST
def save_data_tab(request):
"""Saves the data tab of the default shop.
"""
shop = lfs.core.utils.get_default_shop()
form = ShopDataForm(instance=shop, data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
# Delete image
if request.POST.get("delete_image"):
shop.image.delete()
# reinitialize form in order to properly display uploaded image
form = ShopDataForm(instance=shop)
shop_changed.send(shop)
message = _(u"Shop data has been saved.")
else:
message = _(u"Please correct the indicated errors.")
result = simplejson.dumps({
"html": [["#data", data_tab(request, shop, form)]],
"message": message,
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
@require_POST
def save_default_values_tab(request):
"""Saves the default value part
"""
shop = lfs_get_object_or_404(Shop, pk=1)
form = ShopDefaultValuesForm(instance=shop, data=request.POST)
if form.is_valid():
shop = form.save()
shop_changed.send(shop)
message = _(u"Shop default values have been saved.")
else:
message = _(u"Please correct the indicated errors.")
result = simplejson.dumps({
"html": [["#default_values", default_values_tab(request, shop, form)]],
"message": message
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
@require_POST
def save_seo_tab(request):
"""Saves the seo tab of the default shop.
"""
shop = lfs.core.utils.get_default_shop()
form = ShopSEOForm(instance=shop, data=request.POST)
if form.is_valid():
form.save()
shop_changed.send(shop)
message = _(u"Shop SEO data has been saved.")
else:
message = _(u"Please correct the indicated errors.")
result = simplejson.dumps({
"html": [["#seo", seo_tab(request, shop, form)]],
"message": message,
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
@require_POST
def save_order_numbers_tab(request):
"""Saves the order number tab of the default shop.
"""
shop = lfs.core.utils.get_default_shop()
ong = import_symbol(settings.LFS_ORDER_NUMBER_GENERATOR)
order_number = ong.objects.get(id="order_number")
form = order_number.get_form(instance=order_number, data=request.POST)
if form.is_valid():
form.save()
shop_changed.send(shop)
message = _(u"Order numbers has been saved.")
else:
message = _(u"Please correct the indicated errors.")
result = simplejson.dumps({
"html": [["#order_numbers", order_numbers_tab(request, shop, form)]],
"message": message,
}, cls=LazyEncoder)
return HttpResponse(result)
|
|
'''
Copyright (c) 2016, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an
agency of the United States Government. Neither the United States
Government nor the United States Department of Energy, nor Battelle,
nor any of their employees, nor any jurisdiction or organization
that has cooperated in the development of these materials, makes
any warranty, express or implied, or assumes any legal liability
or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed,
or represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or
service by trade name, trademark, manufacturer, or otherwise does
not necessarily constitute or imply its endorsement, recommendation,
r favoring by the United States Government or any agency thereof,
or Battelle Memorial Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the
United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
'''
import datetime
import logging
import math
from copy import deepcopy
from .common import check_date, validation_builder, check_run_status, setpoint_control_check
from volttron.platform.agent.math_utils import mean
DUCT_STC_RCX = 'Duct Static Pressure Set Point Control Loop Dx'
DUCT_STC_RCX1 = 'Low Duct Static Pressure Dx'
DUCT_STC_RCX2 = 'High Duct Static Pressure Dx'
DX = '/diagnostic message'
CORRECT_STC_PR = 'suggested duct static pressure set point'
STCPR_VALIDATE = 'Duct Static Pressure ACCx'
VALIDATE_FILE_TOKEN = 'stcpr-rcx'
DX = '/diagnostic message'
ST = 'state'
DATA = '/data/'
STCPR_NAME = 'duct static pressure'
def create_table_key(table_name, timestamp):
return '&'.join([table_name, timestamp.strftime('%m-%d-%y %H:%M')])
class DuctStaticRcx(object):
"""Air-side HVAC Self-Correcting Diagnostic: Detect and correct
duct static pressure problems.
"""
def __init__(self, no_req_data, auto_correct_flag, stpt_allowable_dev,
max_stcpr_stpt, stcpr_retuning, zone_high_dmpr_threshold,
zone_low_dmpr_threshold, hdzn_dmpr_thr, min_stcpr_stpt,
analysis, stcpr_stpt_cname):
# Initialize data arrays
self.table_key = None
self.file_key = None
self.zn_dmpr_arr = []
self.stcpr_stpt_arr = []
self.stcpr_arr = []
self.timestamp_arr = []
self.data = {}
self.dx_table = {}
# Initialize configurable thresholds
self.analysis = analysis + '-' + VALIDATE_FILE_TOKEN
self.file_name_id = analysis + '-' + VALIDATE_FILE_TOKEN
self.stcpr_stpt_cname = stcpr_stpt_cname
self.no_req_data = no_req_data
self.stpt_allowable_dev = float(stpt_allowable_dev)
self.max_stcpr_stpt = float(max_stcpr_stpt)
self.stcpr_retuning = float(stcpr_retuning)
self.zone_high_dmpr_threshold = float(zone_high_dmpr_threshold)
self.zone_low_dmpr_threshold = float(zone_low_dmpr_threshold)
self.sp_allowable_dev = float(stpt_allowable_dev)
self.auto_correct_flag = auto_correct_flag
self.min_stcpr_stpt = float(min_stcpr_stpt)
self.hdzn_dmpr_thr = float(hdzn_dmpr_thr)
self.token_offset = 0.0
self.low_msg = ('The supply fan is running at nearly 100% of full '
'speed, data corresponding to {} will not be used.')
self.high_msg = ('The supply fan is running at the minimum speed, '
'data corresponding to {} will not be used.')
def reinitialize(self):
"""Reinitialize data arrays"""
self.table_key = None
self.file_key = None
self.zn_dmpr_arr = []
self.stcpr_stpt_arr = []
self.stcpr_arr = []
self.timestamp_arr = []
self.data = {}
self.dx_table = {}
def duct_static(self, current_time, stcpr_stpt_data, stcpr_data,
zn_dmpr_data, low_dx_cond, high_dx_cond, dx_result,
validate):
"""Check duct static pressure RCx pre-requisites and assemble the
duct static pressure analysis data set.
"""
if check_date(current_time, self.timestamp_arr):
self.reinitialize()
return dx_result
if low_dx_cond:
dx_result.log(self.low_msg.format(current_time), logging.DEBUG)
return dx_result
if high_dx_cond:
dx_result.log(self.high_msg.format(current_time), logging.DEBUG)
return dx_result
file_key = create_table_key(VALIDATE_FILE_TOKEN, current_time)
data = validation_builder(validate, STCPR_VALIDATE, DATA)
run_status = check_run_status(self.timestamp_arr, current_time, self.no_req_data)
if run_status is None:
dx_result.log('Current analysis data set has insufficient data '
'to produce a valid diagnostic result.')
self.reinitialize()
return dx_result
if run_status:
self.table_key = create_table_key(self.analysis, self.timestamp_arr[-1])
avg_stcpr_stpt, dx_table = setpoint_control_check(self.stcpr_stpt_arr,
self.stcpr_arr,
self.stpt_allowable_dev,
DUCT_STC_RCX, DX,
STCPR_NAME, self.token_offset)
self.dx_table.update(dx_table)
dx_result = self.low_stcpr_dx(dx_result, avg_stcpr_stpt)
dx_result = self.high_stcpr_dx(dx_result, avg_stcpr_stpt)
dx_result.insert_table_row(self.table_key, self.dx_table)
self.data.update({STCPR_VALIDATE + DATA + ST: 1})
dx_result.insert_table_row(self.file_key, self.data)
self.reinitialize()
self.stcpr_stpt_arr.append(mean(stcpr_data))
self.stcpr_arr.append(mean(stcpr_stpt_data))
self.zn_dmpr_arr.append(mean(zn_dmpr_data))
self.timestamp_arr.append(current_time)
if self.data:
self.data.update({STCPR_VALIDATE + DATA + ST: 0})
dx_result.insert_table_row(self.file_key, self.data)
self.data = data
self.file_key = file_key
return dx_result
def low_stcpr_dx(self, dx_result, avg_stcpr_stpt):
"""Diagnostic to identify and correct low duct static pressure
(correction by modifying duct static pressure set point).
"""
zn_dmpr = deepcopy(self.zn_dmpr_arr)
zn_dmpr.sort(reverse=False)
zone_dmpr_lowtemp = zn_dmpr[:int(math.ceil(len(self.zn_dmpr_arr)*0.5)) if len(self.zn_dmpr_arr) != 1 else 1]
zn_dmpr_low_avg = mean(zone_dmpr_lowtemp)
zone_dmpr_hightemp = zn_dmpr[int(math.ceil(len(self.zn_dmpr_arr)*0.5)) - 1 if len(self.zn_dmpr_arr) != 1 else 0:]
zn_dmpr_high_avg = mean(zone_dmpr_hightemp)
if zn_dmpr_high_avg > self.zone_high_dmpr_threshold and zn_dmpr_low_avg > self.zone_low_dmpr_threshold:
if avg_stcpr_stpt is None:
# Create diagnostic message for fault
# when duct static pressure set point
# is not available.
msg = ('The duct static pressure set point has been '
'detected to be too low but but supply-air'
'temperature set point data is not available.')
dx_msg = 14.1
elif self.auto_correct_flag:
auto_correct_stcpr_stpt = avg_stcpr_stpt + self.stcpr_retuning
if auto_correct_stcpr_stpt <= self.max_stcpr_stpt:
dx_result.command(self.stcpr_stpt_cname, auto_correct_stcpr_stpt)
new_stcpr_stpt = '%s' % float('%.2g' % auto_correct_stcpr_stpt)
new_stcpr_stpt = new_stcpr_stpt + ' in. w.g.'
msg = ('The duct static pressure was detected to be '
'too low. The duct static pressure has been '
'increased to: {}'
.format(new_stcpr_stpt))
dx_msg = 11.1
else:
dx_result.command(self.stcpr_stpt_cname, self.max_stcpr_stpt)
new_stcpr_stpt = '%s' % float('%.2g' % self.max_stcpr_stpt)
new_stcpr_stpt = new_stcpr_stpt + ' in. w.g.'
msg = ('The duct static pressure set point is at the '
'maximum value configured by the building '
'operator: {})'.format(new_stcpr_stpt))
dx_msg = 12.1
else:
msg = ('The duct static pressure set point was detected '
'to be too low but auto-correction is not enabled.')
dx_msg = 13.1
else:
msg = ('No re-tuning opportunity was detected during the low duct '
'static pressure diagnostic.')
dx_msg = 10.0
self.dx_table.update({DUCT_STC_RCX1 + DX: dx_msg})
dx_result.log(msg, logging.INFO)
return dx_result
def high_stcpr_dx(self, dx_result, avg_stcpr_stpt):
"""Diagnostic to identify and correct high duct static pressure
(correction by modifying duct static pressure set point)
"""
zn_dmpr = deepcopy(self.zn_dmpr_arr)
zn_dmpr.sort(reverse=True)
zn_dmpr = zn_dmpr[:int(math.ceil(len(self.zn_dmpr_arr)*0.5))if len(self.zn_dmpr_arr) != 1 else 1]
avg_zone_damper = mean(zn_dmpr)
if avg_zone_damper <= self.hdzn_dmpr_thr:
if avg_stcpr_stpt is None:
# Create diagnostic message for fault
# when duct static pressure set point
# is not available.
msg = ('The duct static pressure set point has been '
'detected to be too high but but duct static '
'pressure set point data is not available.'
'temperature set point data is not available.')
dx_msg = 24.1
elif self.auto_correct_flag:
auto_correct_stcpr_stpt = avg_stcpr_stpt - self.stcpr_retuning
if auto_correct_stcpr_stpt >= self.min_stcpr_stpt:
dx_result.command(self.stcpr_stpt_cname, auto_correct_stcpr_stpt)
new_stcpr_stpt = '%s' % float('%.2g' % auto_correct_stcpr_stpt)
new_stcpr_stpt = new_stcpr_stpt + ' in. w.g.'
msg = ('The duct static pressure was detected to be '
'too high. The duct static pressure set point '
'has been reduced to: {}'
.format(new_stcpr_stpt))
dx_msg = 21.1
else:
dx_result.command(self.stcpr_stpt_cname, self.min_stcpr_stpt)
new_stcpr_stpt = '%s' % float('%.2g' % self.min_stcpr_stpt)
new_stcpr_stpt = new_stcpr_stpt + ' in. w.g.'
msg = ('The duct static pressure set point is at the '
'minimum value configured by the building '
'operator: {})'.format(new_stcpr_stpt))
dx_msg = 22.1
else:
msg = ('Duct static pressure set point was detected to be '
'too high but auto-correction is not enabled.')
dx_msg = 23.1
else:
msg = ('No re-tuning opportunity was detected during the high duct '
'static pressure diagnostic.')
dx_msg = 20.0
self.dx_table.update({DUCT_STC_RCX2 + DX: dx_msg})
dx_result.log(msg, logging.INFO)
return dx_result
|
|
# -*- coding: utf-8 -*-
"""
Evaluation functions
"""
import logging
import numpy as np
from sklearn.cluster import AgglomerativeClustering, KMeans
from .datasets.similarity import fetch_MEN, fetch_WS353, fetch_SimLex999, fetch_MTurk, fetch_RG65, fetch_RW, fetch_TR9856
from .datasets.categorization import fetch_AP, fetch_battig, fetch_BLESS, fetch_ESSLI_1a, fetch_ESSLI_2b, \
fetch_ESSLI_2c
from web.analogy import *
from six import iteritems
from web.embedding import Embedding
logger = logging.getLogger(__name__)
def calculate_purity(y_true, y_pred):
"""
Calculate purity for given true and predicted cluster labels.
Parameters
----------
y_true: array, shape: (n_samples, 1)
True cluster labels
y_pred: array, shape: (n_samples, 1)
Cluster assingment.
Returns
-------
purity: float
Calculated purity.
"""
assert len(y_true) == len(y_pred)
true_clusters = np.zeros(shape=(len(set(y_true)), len(y_true)))
pred_clusters = np.zeros_like(true_clusters)
for id, cl in enumerate(set(y_true)):
true_clusters[id] = (y_true == cl).astype("int")
for id, cl in enumerate(set(y_pred)):
pred_clusters[id] = (y_pred == cl).astype("int")
M = pred_clusters.dot(true_clusters.T)
return 1. / len(y_true) * np.sum(np.max(M, axis=1))
def evaluate_categorization(w, X, y, method="all", seed=None):
"""
Evaluate embeddings on categorization task.
Parameters
----------
w: Embedding or dict
Embedding to test.
X: vector, shape: (n_samples, )
Vector of words.
y: vector, shape: (n_samples, )
Vector of cluster assignments.
method: string, default: "all"
What method to use. Possible values are "agglomerative", "kmeans", "all.
If "agglomerative" is passed, method will fit AgglomerativeClustering (with very crude
hyperparameter tuning to avoid overfitting).
If "kmeans" is passed, method will fit KMeans.
In both cases number of clusters is preset to the correct value.
seed: int, default: None
Seed passed to KMeans.
Returns
-------
purity: float
Purity of the best obtained clustering.
Notes
-----
KMedoids method was excluded as empirically didn't improve over KMeans (for categorization
tasks available in the package).
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
assert method in ["all", "kmeans", "agglomerative"], "Uncrecognized method"
mean_vector = np.mean(w.vectors, axis=0, keepdims=True)
words = np.vstack(w.get(word, mean_vector) for word in X.flatten())
ids = np.random.RandomState(seed).choice(range(len(X)), len(X), replace=False)
# Evaluate clustering on several hyperparameters of AgglomerativeClustering and
# KMeans
best_purity = 0
if method == "all" or method == "agglomerative":
best_purity = calculate_purity(y[ids], AgglomerativeClustering(n_clusters=len(set(y)),
affinity="euclidean",
linkage="ward").fit_predict(words[ids]))
logger.debug("Purity={:.3f} using affinity={} linkage={}".format(best_purity, 'euclidean', 'ward'))
for affinity in ["cosine", "euclidean"]:
for linkage in ["average", "complete"]:
purity = calculate_purity(y[ids], AgglomerativeClustering(n_clusters=len(set(y)),
affinity=affinity,
linkage=linkage).fit_predict(words[ids]))
logger.debug("Purity={:.3f} using affinity={} linkage={}".format(purity, affinity, linkage))
best_purity = max(best_purity, purity)
if method == "all" or method == "kmeans":
purity = calculate_purity(y[ids], KMeans(random_state=seed, n_init=10, n_clusters=len(set(y))).
fit_predict(words[ids]))
logger.debug("Purity={:.3f} using KMeans".format(purity))
best_purity = max(purity, best_purity)
return best_purity
def evaluate_on_semeval_2012_2(w):
"""
Simple method to score embedding using SimpleAnalogySolver
Parameters
----------
w : Embedding or dict
Embedding or dict instance.
Returns
-------
result: pandas.DataFrame
Results with spearman correlation per broad category with special key "all" for summary
spearman correlation
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
data = fetch_semeval_2012_2()
mean_vector = np.mean(w.vectors, axis=0, keepdims=True)
categories = data.y.keys()
results = defaultdict(list)
for c in categories:
# Get mean of left and right vector
prototypes = data.X_prot[c]
prot_left = np.mean(np.vstack(w.get(word, mean_vector) for word in prototypes[:, 0]), axis=0)
prot_right = np.mean(np.vstack(w.get(word, mean_vector) for word in prototypes[:, 1]), axis=0)
questions = data.X[c]
question_left, question_right = np.vstack(w.get(word, mean_vector) for word in questions[:, 0]), \
np.vstack(w.get(word, mean_vector) for word in questions[:, 1])
scores = np.dot(prot_left - prot_right, (question_left - question_right).T)
c_name = data.categories_names[c].split("_")[0]
# NaN happens when there are only 0s, which might happen for very rare words or
# very insufficient word vocabulary
cor = scipy.stats.spearmanr(scores, data.y[c]).correlation
results[c_name].append(0 if np.isnan(cor) else cor)
final_results = OrderedDict()
final_results['all'] = sum(sum(v) for v in results.values()) / len(categories)
for k in results:
final_results[k] = sum(results[k]) / len(results[k])
return pd.Series(final_results)
def evaluate_analogy(w, X, y, method="add", k=None, category=None, batch_size=100):
"""
Simple method to score embedding using SimpleAnalogySolver
Parameters
----------
w : Embedding or dict
Embedding or dict instance.
method : {"add", "mul"}
Method to use when finding analogy answer, see "Improving Distributional Similarity
with Lessons Learned from Word Embeddings"
X : array-like, shape (n_samples, 3)
Analogy questions.
y : array-like, shape (n_samples, )
Analogy answers.
k : int, default: None
If not None will select k top most frequent words from embedding
batch_size : int, default: 100
Increase to increase memory consumption and decrease running time
category : list, default: None
Category of each example, if passed function returns accuracy per category
in addition to the overall performance.
Analogy datasets have "category" field that can be supplied here.
Returns
-------
result: dict
Results, where each key is for given category and special empty key "" stores
summarized accuracy across categories
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
assert category is None or len(category) == y.shape[0], "Passed incorrect category list"
solver = SimpleAnalogySolver(w=w, method=method, batch_size=batch_size, k=k)
y_pred = solver.predict(X)
if category is not None:
results = OrderedDict({"all": np.mean(y_pred == y)})
count = OrderedDict({"all": len(y_pred)})
correct = OrderedDict({"all": np.sum(y_pred == y)})
for cat in set(category):
results[cat] = np.mean(y_pred[category == cat] == y[category == cat])
count[cat] = np.sum(category == cat)
correct[cat] = np.sum(y_pred[category == cat] == y[category == cat])
return pd.concat([pd.Series(results, name="accuracy"),
pd.Series(correct, name="correct"),
pd.Series(count, name="count")],
axis=1)
else:
return np.mean(y_pred == y)
def evaluate_on_WordRep(w, max_pairs=1000, solver_kwargs={}):
"""
Evaluate on WordRep dataset
Parameters
----------
w : Embedding or dict
Embedding or dict instance.
max_pairs: int, default: 1000
Each category will be constrained to maximum of max_pairs pairs
(which results in max_pair * (max_pairs - 1) examples)
solver_kwargs: dict, default: {}
Arguments passed to SimpleAnalogySolver. It is suggested to limit number of words
in the dictionary.
References
----------
Bin Gao, Jiang Bian, Tie-Yan Liu (2015)
"WordRep: A Benchmark for Research on Learning Word Representations"
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
data = fetch_wordrep()
categories = set(data.category)
accuracy = {}
correct = {}
count = {}
for cat in categories:
X_cat = data.X[data.category == cat]
X_cat = X_cat[0:max_pairs]
logger.info("Processing {} with {} pairs, {} questions".format(cat, X_cat.shape[0]
, X_cat.shape[0] * (X_cat.shape[0] - 1)))
# For each category construct question-answer pairs
size = X_cat.shape[0] * (X_cat.shape[0] - 1)
X = np.zeros(shape=(size, 3), dtype="object")
y = np.zeros(shape=(size,), dtype="object")
id = 0
for left, right in product(X_cat, X_cat):
if not np.array_equal(left, right):
X[id, 0:2] = left
X[id, 2] = right[0]
y[id] = right[1]
id += 1
# Run solver
solver = SimpleAnalogySolver(w=w, **solver_kwargs)
y_pred = solver.predict(X)
correct[cat] = float(np.sum(y_pred == y))
count[cat] = size
accuracy[cat] = float(np.sum(y_pred == y)) / size
# Add summary results
correct['wikipedia'] = sum(correct[c] for c in categories if c in data.wikipedia_categories)
correct['all'] = sum(correct[c] for c in categories)
correct['wordnet'] = sum(correct[c] for c in categories if c in data.wordnet_categories)
count['wikipedia'] = sum(count[c] for c in categories if c in data.wikipedia_categories)
count['all'] = sum(count[c] for c in categories)
count['wordnet'] = sum(count[c] for c in categories if c in data.wordnet_categories)
accuracy['wikipedia'] = correct['wikipedia'] / count['wikipedia']
accuracy['all'] = correct['all'] / count['all']
accuracy['wordnet'] = correct['wordnet'] / count['wordnet']
return pd.concat([pd.Series(accuracy, name="accuracy"),
pd.Series(correct, name="correct"),
pd.Series(count, name="count")], axis=1)
def evaluate_similarity(w, X, y):
"""
Calculate Spearman correlation between cosine similarity of the model
and human rated similarity of word pairs
Parameters
----------
w : Embedding or dict
Embedding or dict instance.
X: array, shape: (n_samples, 2)
Word pairs
y: vector, shape: (n_samples,)
Human ratings
Returns
-------
cor: float
Spearman correlation
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
missing_words = 0
words = w.vocabulary.word_id
for query in X:
for query_word in query:
if query_word not in words:
missing_words += 1
if missing_words > 0:
logger.warning("Missing {} words. Will replace them with mean vector".format(missing_words))
mean_vector = np.mean(w.vectors, axis=0, keepdims=True)
A = np.vstack(w.get(word, mean_vector) for word in X[:, 0])
B = np.vstack(w.get(word, mean_vector) for word in X[:, 1])
scores = np.array([v1.dot(v2.T)/(np.linalg.norm(v1)*np.linalg.norm(v2)) for v1, v2 in zip(A, B)])
return scipy.stats.spearmanr(scores, y).correlation
def evaluate_on_all(w):
"""
Evaluate Embedding on all fast-running benchmarks
Parameters
----------
w: Embedding or dict
Embedding to evaluate.
Returns
-------
results: pandas.DataFrame
DataFrame with results, one per column.
"""
if isinstance(w, dict):
w = Embedding.from_dict(w)
# Calculate results on similarity
logger.info("Calculating similarity benchmarks")
similarity_tasks = {
"MEN": fetch_MEN(),
"WS353": fetch_WS353(),
"WS353R": fetch_WS353(which="relatedness"),
"WS353S": fetch_WS353(which="similarity"),
"SimLex999": fetch_SimLex999(),
"RW": fetch_RW(),
"RG65": fetch_RG65(),
"MTurk": fetch_MTurk(),
}
similarity_results = {}
for name, data in iteritems(similarity_tasks):
similarity_results[name] = evaluate_similarity(w, data.X, data.y)
logger.info("Spearman correlation of scores on {} {}".format(name, similarity_results[name]))
# Calculate results on analogy
logger.info("Calculating analogy benchmarks")
analogy_tasks = {
"Google": fetch_google_analogy(),
"MSR": fetch_msr_analogy()
}
analogy_results = {}
for name, data in iteritems(analogy_tasks):
analogy_results[name] = evaluate_analogy(w, data.X, data.y)
logger.info("Analogy prediction accuracy on {} {}".format(name, analogy_results[name]))
analogy_results["SemEval2012_2"] = evaluate_on_semeval_2012_2(w)['all']
logger.info("Analogy prediction accuracy on {} {}".format("SemEval2012", analogy_results["SemEval2012_2"]))
# Calculate results on categorization
logger.info("Calculating categorization benchmarks")
categorization_tasks = {
"AP": fetch_AP(),
"BLESS": fetch_BLESS(),
"Battig": fetch_battig(),
"ESSLI_2c": fetch_ESSLI_2c(),
"ESSLI_2b": fetch_ESSLI_2b(),
"ESSLI_1a": fetch_ESSLI_1a()
}
categorization_results = {}
# Calculate results using helper function
for name, data in iteritems(categorization_tasks):
categorization_results[name] = evaluate_categorization(w, data.X, data.y)
logger.info("Cluster purity on {} {}".format(name, categorization_results[name]))
# Construct pd table
cat = pd.DataFrame([categorization_results])
analogy = pd.DataFrame([analogy_results])
sim = pd.DataFrame([similarity_results])
results = cat.join(sim).join(analogy)
return results
|
|
from distutils.command.config import config
import numpy
import copy
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from bumps import options
from bumps import fitters
import sas.qtgui.Utilities.LocalConfig as LocalConfig
import sas.qtgui.Utilities.ObjectLibrary as ObjectLibrary
import sas.qtgui.Utilities.GuiUtils as GuiUtils
from sas.qtgui.Perspectives.Fitting.Constraint import Constraint
from sas.qtgui.Perspectives.Fitting.FittingWidget import FittingWidget
from sas.qtgui.Perspectives.Fitting.ConstraintWidget import ConstraintWidget
from sas.qtgui.Perspectives.Fitting.FittingOptions import FittingOptions
from sas.qtgui.Perspectives.Fitting.GPUOptions import GPUOptions
class FittingWindow(QtWidgets.QTabWidget):
"""
"""
tabsModifiedSignal = QtCore.pyqtSignal()
fittingStartedSignal = QtCore.pyqtSignal(list)
fittingStoppedSignal = QtCore.pyqtSignal(list)
name = "Fitting" # For displaying in the combo box in DataExplorer
ext = "fitv" # Extension used for saving analyses
def __init__(self, parent=None, data=None):
super(FittingWindow, self).__init__()
self.parent = parent
self._data = data
# List of active fits
self.tabs = []
# Max index for adding new, non-clashing tab names
self.maxIndex = 1
# The default optimizer
self.optimizer = 'Levenberg-Marquardt'
# Dataset index -> Fitting tab mapping
self.dataToFitTab = {}
# The tabs need to be closeable
self.setTabsClosable(True)
# The tabs need to be movabe
self.setMovable(True)
self.communicate = self.parent.communicator()
# Initialize the first tab
self.addFit(None)
# Deal with signals
self.tabCloseRequested.connect(self.tabCloses)
self.communicate.dataDeletedSignal.connect(self.dataDeleted)
self.fittingStartedSignal.connect(self.onFittingStarted)
self.fittingStoppedSignal.connect(self.onFittingStopped)
self.communicate.copyFitParamsSignal.connect(self.onParamCopy)
self.communicate.pasteFitParamsSignal.connect(self.onParamPaste)
self.communicate.copyExcelFitParamsSignal.connect(self.onExcelCopy)
self.communicate.copyLatexFitParamsSignal.connect(self.onLatexCopy)
self.communicate.SaveFitParamsSignal.connect(self.onParamSave)
# Perspective window not allowed to close by default
self._allow_close = False
# Fit options - uniform for all tabs
self.fit_options = options.FIT_CONFIG
self.fit_options_widget = FittingOptions(self, config=self.fit_options)
self.fit_options.selected_id = fitters.LevenbergMarquardtFit.id
# Listen to GUI Manager signal updating fit options
self.fit_options_widget.fit_option_changed.connect(self.onFittingOptionsChange)
# GPU Options
self.gpu_options_widget = GPUOptions(self)
self.updateWindowTitle()
# Add new tab mini-button
self.plusButton = QtWidgets.QToolButton(self)
self.plusButton.setText("+")
self.setCornerWidget(self.plusButton)
self.plusButton.setToolTip("Add a new Fit Page")
self.plusButton.clicked.connect(lambda: self.addFit(None))
def updateWindowTitle(self):
"""
Update the window title with the current optimizer name
"""
self.optimizer = self.fit_options.selected_name
self.setWindowTitle('Fit panel - Active Fitting Optimizer: %s' % self.optimizer)
def setClosable(self, value=True):
"""
Allow outsiders to close this widget
"""
assert isinstance(value, bool)
self._allow_close = value
def onParamCopy(self):
self.currentTab.onCopyToClipboard("")
def onParamPaste(self):
self.currentTab.onParameterPaste()
def onExcelCopy(self):
self.currentTab.onCopyToClipboard("Excel")
def onLatexCopy(self):
self.currentTab.onCopyToClipboard("Latex")
def serializeAll(self):
return self.serializeAllFitpage()
def serializeAllFitpage(self):
# serialize all active fitpages and return
# a dictionary: {data_id: fitpage_state}
state = {}
for i, tab in enumerate(self.tabs):
tab_state = self.getSerializedFitpage(tab)
for key, value in tab_state.items():
if key in state:
state[key].update(value)
else:
state[key] = value
return state
def serializeCurrentPage(self):
# serialize current(active) fitpage
return self.getSerializedFitpage(self.currentTab)
def getSerializedFitpage(self, tab):
"""
get serialize requested fit tab
"""
state = {}
fitpage_state = tab.getFitPage()
fitpage_state += tab.getFitModel()
# put the text into dictionary
line_dict = {}
for line in fitpage_state:
#content = line.split(',')
if len(line) > 1:
line_dict[line[0]] = line[1:]
if 'data_id' not in line_dict: return state
id = line_dict['data_id'][0]
if not isinstance(id, list):
id = [id]
for i in id:
if 'is_constraint' in line_dict.keys():
state[i] = line_dict
elif i in state and 'fit-params' in state[i]:
state[i]['fit_params'].update(line_dict)
else:
state[i] = {'fit_params': [line_dict]}
return state
def currentTabDataId(self):
"""
Returns the data ID of the current tab
"""
tab_id = []
if not self.currentTab.data:
return tab_id
for item in self.currentTab.all_data:
data = GuiUtils.dataFromItem(item)
tab_id.append(data.id)
return tab_id
def updateFromParameters(self, parameters):
"""
Pass the update parameters to the current fit page
"""
self.currentTab.createPageForParameters(parameters)
def updateFromConstraints(self, constraint_dict):
"""
Updates all tabs with constraints present in *constraint_dict*, where
*constraint_dict* keys are the fit page name, and the value is a
list of constraints. A constraint is represented by a list [value,
param, value_ex, validate, function] of attributes of a Constraint
object
"""
for fit_page_name, constraint_list in constraint_dict.items():
tab = self.getTabByName(fit_page_name)
for constraint_param in constraint_list:
if constraint_param is not None and len(constraint_param) == 5:
constraint = Constraint()
constraint.value = constraint_param[0]
constraint.func = constraint_param[4]
constraint.param = constraint_param[1]
constraint.value_ex = constraint_param[2]
constraint.validate = constraint_param[3]
tab.addConstraintToRow(constraint=constraint,
row=tab.getRowFromName(
constraint_param[1]))
def onParamSave(self):
self.currentTab.onCopyToClipboard("Save")
def closeEvent(self, event):
"""
Overwrite QDialog close method to allow for custom widget close
"""
# Invoke fit page events
if self._allow_close:
# reset the closability flag
self.setClosable(value=False)
# Tell the MdiArea to close the container if it is visible
if self.parentWidget():
self.parentWidget().close()
event.accept()
else:
# Maybe we should just minimize
self.setWindowState(QtCore.Qt.WindowMinimized)
event.ignore()
def addFit(self, data, is_batch=False, tab_index=None):
"""
Add a new tab for passed data
"""
if tab_index is None:
tab_index = self.maxIndex
else:
self.maxIndex = tab_index
tab = FittingWidget(parent=self.parent, data=data, tab_id=tab_index)
tab.is_batch_fitting = is_batch
# Add this tab to the object library so it can be retrieved by scripting/jupyter
tab_name = self.getTabName(is_batch=is_batch)
ObjectLibrary.addObject(tab_name, tab)
self.tabs.append(tab)
if data:
self.updateFitDict(data, tab_name)
self.maxIndex = max([tab.tab_id for tab in self.tabs], default=0) + 1
icon = QtGui.QIcon()
if is_batch:
icon.addPixmap(QtGui.QPixmap("src/sas/qtgui/images/icons/layers.svg"))
self.addTab(tab, icon, tab_name)
# Show the new tab
self.setCurrentWidget(tab)
# Notify listeners
self.tabsModifiedSignal.emit()
def addConstraintTab(self):
"""
Add a new C&S fitting tab
"""
tabs = [isinstance(tab, ConstraintWidget) for tab in self.tabs]
if any(tabs):
# We already have a C&S tab: show it
self.setCurrentIndex(tabs.index(True))
return
tab = ConstraintWidget(parent=self)
# Add this tab to the object library so it can be retrieved by scripting/jupyter
tab_name = self.getCSTabName() # TODO update the tab name scheme
ObjectLibrary.addObject(tab_name, tab)
self.tabs.append(tab)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("src/sas/qtgui/images/icons/link.svg"))
self.addTab(tab, icon, tab_name)
# This will be the last tab, so set the index accordingly
self.setCurrentIndex(self.count()-1)
def updateFitDict(self, item_key, tab_name):
"""
Create a list if none exists and append if there's already a list
"""
item_key_str = str(item_key)
if item_key_str in list(self.dataToFitTab.keys()):
self.dataToFitTab[item_key_str].append(tab_name)
else:
self.dataToFitTab[item_key_str] = [tab_name]
def getTabName(self, is_batch=False):
"""
Get the new tab name, based on the number of fitting tabs so far
"""
page_name = "BatchPage" if is_batch else "FitPage"
page_name = page_name + str(self.maxIndex)
return page_name
def getCSTabName(self):
"""
Get the new tab name, based on the number of fitting tabs so far
"""
page_name = "Const. & Simul. Fit"
return page_name
def closeTabByIndex(self, index):
"""
Close/delete a tab with the given index.
No checks on validity of the index.
"""
try:
ObjectLibrary.deleteObjectByRef(self.tabs[index])
self.removeTab(index)
del self.tabs[index]
self.tabsModifiedSignal.emit()
except IndexError:
# The tab might have already been deleted previously
pass
def resetTab(self, index):
"""
Adds a new tab and removes the last tab
as a way of resetting the fit tabs
"""
# If data on tab empty - do nothing
if index in self.tabs and not self.tabs[index].data:
return
# Add a new, empy tab
self.addFit(None)
# Remove the previous last tab
self.tabCloses(index)
def tabCloses(self, index):
"""
Update local bookkeeping on tab close
"""
# don't remove the last tab
if len(self.tabs) <= 1:
self.resetTab(index)
return
self.closeTabByIndex(index)
def closeTabByName(self, tab_name):
"""
Given name of the fitting tab - close it
"""
for tab_index in range(len(self.tabs)):
if self.tabText(tab_index) == tab_name:
self.tabCloses(tab_index)
pass # debug hook
def dataDeleted(self, index_list):
"""
Delete fit tabs referencing given data
"""
if not index_list or not self.dataToFitTab:
return
for index_to_delete in index_list:
index_to_delete_str = str(index_to_delete)
orig_dict = copy.deepcopy(self.dataToFitTab)
for tab_key in orig_dict.keys():
if index_to_delete_str in tab_key:
for tab_name in orig_dict[tab_key]:
self.closeTabByName(tab_name)
self.dataToFitTab.pop(tab_key)
def allowBatch(self):
"""
Tell the caller that we accept multiple data instances
"""
return True
def allowSwap(self):
"""
Tell the caller that you can swap data
"""
return True
def isSerializable(self):
"""
Tell the caller that this perspective writes its state
"""
return True
def setData(self, data_item=None, is_batch=False, tab_index=None):
"""
Assign new dataset to the fitting instance
Obtain a QStandardItem object and dissect it to get Data1D/2D
Pass it over to the calculator
"""
assert data_item is not None
if not isinstance(data_item, list):
msg = "Incorrect type passed to the Fitting Perspective"
raise AttributeError(msg)
if not isinstance(data_item[0], QtGui.QStandardItem):
msg = "Incorrect type passed to the Fitting Perspective"
raise AttributeError(msg)
if is_batch:
# Just create a new fit tab. No empty batchFit tabs
self.addFit(data_item, is_batch=is_batch)
return
items = [data_item] if is_batch else data_item
for data in items:
# Find the first unassigned tab.
# If none, open a new tab.
available_tabs = [tab.acceptsData() for tab in self.tabs]
tab_ids = [tab.tab_id for tab in self.tabs]
if tab_index is not None:
if tab_index not in tab_ids:
self.addFit(data, is_batch=is_batch, tab_index=tab_index)
else:
self.setCurrentIndex(tab_index-1)
self.swapData(data)
return
if numpy.any(available_tabs):
first_good_tab = available_tabs.index(True)
self.tabs[first_good_tab].data = data
tab_name = str(self.tabText(first_good_tab))
self.updateFitDict(data, tab_name)
else:
self.addFit(data, is_batch=is_batch)
def swapData(self, data):
"""
Replace the data from the current fitting tab
"""
if not isinstance(self.currentWidget(), FittingWidget):
msg = "Current tab is not a fitting widget"
raise TypeError(msg)
if not isinstance(data, QtGui.QStandardItem):
msg = "Incorrect type passed to the Fitting Perspective"
raise AttributeError(msg)
if self.currentTab.is_batch_fitting:
msg = "Data in Batch Fitting cannot be swapped"
raise RuntimeError(msg)
self.currentTab.data = data
tab_name = str(self.tabText(self.currentIndex()))
self.updateFitDict(data, tab_name)
def onFittingOptionsChange(self, fit_engine):
"""
React to the fitting algorithm change by modifying window title
"""
fitter = [f.id for f in options.FITTERS if f.name == str(fit_engine)][0]
# set the optimizer
self.fit_options.selected_id = str(fitter)
# Update the title
self.updateWindowTitle()
def onFittingStarted(self, tabs_for_fitting=None):
"""
Notify tabs listed in tabs_for_fitting
that the fitting thread started
"""
assert(isinstance(tabs_for_fitting, list))
assert(len(tabs_for_fitting)>0)
for tab_object in self.tabs:
if not isinstance(tab_object, FittingWidget):
continue
page_name = "Page%s"%tab_object.tab_id
if any([page_name in tab for tab in tabs_for_fitting]):
tab_object.disableInteractiveElements()
pass
def onFittingStopped(self, tabs_for_fitting=None):
"""
Notify tabs listed in tabs_for_fitting
that the fitting thread stopped
"""
assert(isinstance(tabs_for_fitting, list))
assert(len(tabs_for_fitting)>0)
for tab_object in self.tabs:
if not isinstance(tab_object, FittingWidget):
continue
page_name = "Page%s"%tab_object.tab_id
if any([page_name in tab for tab in tabs_for_fitting]):
tab_object.enableInteractiveElements()
def getCurrentStateAsXml(self):
"""
Returns an XML version of the current state
"""
state = {}
for tab in self.tabs:
pass
return state
@property
def currentTab(self):
"""
Returns the tab widget currently shown
"""
return self.currentWidget()
def getFitTabs(self):
"""
Returns the list of fitting tabs
"""
return [tab for tab in self.tabs if isinstance(tab, FittingWidget)]
def getActiveConstraintList(self):
"""
Returns a list of the constraints for all fitting tabs. Constraints
are a tuple of strings (parameter, expression) e.g. ('M1.scale',
'M2.scale + 2')
"""
constraints = []
for tab in self.getFitTabs():
tab_name = tab.modelName()
tab_constraints = tab.getConstraintsForModel()
constraints.extend((tab_name + "." + par, expr)
for par, expr in tab_constraints)
return constraints
def getSymbolDictForConstraints(self):
"""
Returns a dictionary containing all the symbols in all constrained tabs
and their values.
"""
symbol_dict = {}
for tab in self.getFitTabs():
symbol_dict.update(tab.getSymbolDict())
return symbol_dict
def getConstraintTab(self):
"""
Returns the constraint tab, or None if no constraint tab is active
"""
if any(isinstance(tab, ConstraintWidget) for tab in self.tabs):
constraint_tab = next(tab
for tab in self.tabs
if isinstance(tab, ConstraintWidget))
else:
constraint_tab = None
return constraint_tab
def getTabByName(self, name):
"""
Returns the tab with with attribute name *name*
"""
assert isinstance(name, str)
for tab in self.tabs:
if tab.modelName() == name:
return tab
return None
|
|
#!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Global system tests for V8 test runners and fuzzers.
This hooks up the framework under tools/testrunner testing high-level scenarios
with different test suite extensions and build configurations.
"""
# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
# independent.
# TODO(machenbach): Move coverage recording to a global test entry point to
# include other unittest suites in the coverage report.
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
# TODO(majeski): Add some tests for the fuzzers.
import collections
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from cStringIO import StringIO
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
Result = collections.namedtuple(
'Result', ['stdout', 'stderr', 'returncode'])
Result.__str__ = lambda self: (
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
(self.returncode, self.stdout, self.stderr))
@contextlib.contextmanager
def temp_dir():
"""Wrapper making a temporary directory available."""
path = None
try:
path = tempfile.mkdtemp('v8_test_')
yield path
finally:
if path:
shutil.rmtree(path)
@contextlib.contextmanager
def temp_base(baseroot='testroot1'):
"""Wrapper that sets up a temporary V8 test root.
Args:
baseroot: The folder with the test root blueprint. Relevant files will be
copied to the temporary test root, to guarantee a fresh setup with no
dirty state.
"""
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
with temp_dir() as tempbase:
builddir = os.path.join(tempbase, 'out', 'Release')
testroot = os.path.join(tempbase, 'test')
os.makedirs(builddir)
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
for suite in os.listdir(os.path.join(basedir, 'test')):
os.makedirs(os.path.join(testroot, suite))
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
shutil.copy(
os.path.join(basedir, 'test', suite, entry),
os.path.join(testroot, suite))
yield tempbase
@contextlib.contextmanager
def capture():
"""Wrapper that replaces system stdout/stderr an provides the streams."""
oldout = sys.stdout
olderr = sys.stderr
try:
stdout=StringIO()
stderr=StringIO()
sys.stdout = stdout
sys.stderr = stderr
yield stdout, stderr
finally:
sys.stdout = oldout
sys.stderr = olderr
def run_tests(basedir, *args, **kwargs):
"""Executes the test runner with captured output."""
with capture() as (stdout, stderr):
sys_args = ['--command-prefix', sys.executable] + list(args)
if kwargs.get('infra_staging', False):
sys_args.append('--infra-staging')
else:
sys_args.append('--no-infra-staging')
code = standard_runner.StandardTestRunner(
basedir=basedir).execute(sys_args)
return Result(stdout.getvalue(), stderr.getvalue(), code)
def override_build_config(basedir, **kwargs):
"""Override the build config with new values provided as kwargs."""
path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
with open(path) as f:
config = json.load(f)
config.update(kwargs)
with open(path, 'w') as f:
json.dump(config, f)
class SystemTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Try to set up python coverage and run without it if not available.
cls._cov = None
try:
import coverage
if int(coverage.__version__.split('.')[0]) < 4:
cls._cov = None
print 'Python coverage version >= 4 required.'
raise ImportError()
cls._cov = coverage.Coverage(
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
omit=['*unittest*', '*__init__.py'],
)
cls._cov.exclude('raise NotImplementedError')
cls._cov.exclude('if __name__ == .__main__.:')
cls._cov.exclude('except TestRunnerError:')
cls._cov.exclude('except KeyboardInterrupt:')
cls._cov.exclude('if options.verbose:')
cls._cov.exclude('if verbose:')
cls._cov.exclude('pass')
cls._cov.exclude('assert False')
cls._cov.start()
except ImportError:
print 'Running without python coverage.'
sys.path.append(TOOLS_ROOT)
global standard_runner
from testrunner import standard_runner
from testrunner.local import command
from testrunner.local import pool
command.setup_testing()
pool.setup_testing()
@classmethod
def tearDownClass(cls):
if cls._cov:
cls._cov.stop()
print ''
print cls._cov.report(show_missing=True)
def testPass(self):
"""Test running only passing tests in two variants.
Also test printing durations.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--time',
'sweet/bananas',
'sweet/raspberries',
)
self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
# TODO(majeski): Implement for test processors
# self.assertIn('Total time:', result.stderr, result)
# self.assertIn('sweet/bananas', result.stderr, result)
self.assertEqual(0, result.returncode, result)
def testShardedProc(self):
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
infra_staging=True,
)
# One of the shards gets one variant of each test.
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
if shard == 1:
self.assertIn('Done running sweet/bananas', result.stdout, result)
else:
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@unittest.skip("incompatible with test processors")
def testSharded(self):
"""Test running a particular shard."""
with temp_base() as basedir:
for shard in [1, 2]:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'--shard-count=2',
'--shard-run=%d' % shard,
'sweet/bananas',
'sweet/raspberries',
)
# One of the shards gets one variant of each test.
self.assertIn('Running 2 tests', result.stdout, result)
self.assertIn('Done running sweet/bananas', result.stdout, result)
self.assertIn('Done running sweet/raspberries', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testFailProc(self):
self.testFail(infra_staging=True)
def testFail(self, infra_staging=True):
"""Test running only failing tests in two variants."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'sweet/strawberries',
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Running 2 tests', result.stdout, result)
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def check_cleaned_json_output(self, expected_results_name, actual_json):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
# path dependent on where this runs.
def replace_variable_data(data):
data['duration'] = 1
data['command'] = ' '.join(
['/usr/bin/python'] + data['command'].split()[1:])
for data in json_output['slowest_tests']:
replace_variable_data(data)
for data in json_output['results']:
replace_variable_data(data)
json_output['duration_mean'] = 1
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)
msg = None # Set to pretty_json for bootstrapping.
self.assertDictEqual(json_output, expected_test_results, msg)
def testFailWithRerunAndJSONProc(self):
self.testFailWithRerunAndJSON(infra_staging=True)
def testFailWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base() as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet/strawberries',
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Running 1 tests', result.stdout, result)
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
if not infra_staging:
# We run one test, which fails and gets re-run twice.
self.assertIn('3 tests failed', result.stdout, result)
else:
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(majeski): Previously we only reported the variant flags in the
# flags field of the test result.
# After recent changes we report all flags, including the file names.
# This is redundant to the command. Needs investigation.
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results1.json', json_path)
def testFlakeWithRerunAndJSONProc(self):
self.testFlakeWithRerunAndJSON(infra_staging=True)
def testFlakeWithRerunAndJSON(self, infra_staging=True):
"""Test re-running a failing test and output to json."""
with temp_base(baseroot='testroot2') as basedir:
json_path = os.path.join(basedir, 'out.json')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--rerun-failures-count=2',
'--random-seed=123',
'--json-test-results', json_path,
'sweet',
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Running 1 tests', result.stdout, result)
self.assertIn(
'Done running sweet/bananaflakes: FAIL', result.stdout, result)
self.assertIn('1 tests failed', result.stdout, result)
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn(
'Done running sweet/bananaflakes: pass', result.stdout, result)
self.assertIn('All tests succeeded', result.stdout, result)
self.assertEqual(0, result.returncode, result)
self.maxDiff = None
self.check_cleaned_json_output('expected_test_results2.json', json_path)
def testAutoDetect(self):
"""Fake a build with several auto-detected options.
Using all those options at once doesn't really make much sense. This is
merely for getting coverage.
"""
with temp_base() as basedir:
override_build_config(
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_use_snapshot=False)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
expect_text = (
'>>> Autodetected:\n'
'asan\n'
'cfi_vptr\n'
'dcheck_always_on\n'
'msan\n'
'no_i18n\n'
'no_snap\n'
'tsan\n'
'ubsan_vptr\n'
'>>> Running tests for ia32.release')
self.assertIn(expect_text, result.stdout, result)
self.assertEqual(0, result.returncode, result)
# TODO(machenbach): Test some more implications of the auto-detected
# options, e.g. that the right env variables are set.
def testSkipsProc(self):
self.testSkips(infra_staging=True)
def testSkips(self, infra_staging=True):
"""Test skipping tests in status file for a specific variant."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=nooptimization',
'sweet/strawberries',
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Running 0 tests', result.stdout, result)
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testDefaultProc(self):
self.testDefault(infra_staging=True)
def testDefault(self, infra_staging=True):
"""Test using default test suites, though no tests are run since they don't
exist in a test setting.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Warning: no tests were run!', result.stdout, result)
else:
self.assertIn('Running 0 base tests', result.stdout, result)
self.assertIn('0 tests ran', result.stdout, result)
self.assertEqual(2, result.returncode, result)
def testNoBuildConfig(self):
"""Test failing run when build config is not found."""
with temp_base() as basedir:
result = run_tests(basedir)
self.assertIn('Failed to load build config', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testGNOption(self):
"""Test using gn option, but no gn build folder is found."""
with temp_base() as basedir:
# TODO(machenbach): This should fail gracefully.
with self.assertRaises(OSError):
run_tests(basedir, '--gn')
def testInconsistentMode(self):
"""Test failing run when attempting to wrongly override the mode."""
with temp_base() as basedir:
override_build_config(basedir, is_debug=True)
result = run_tests(basedir, '--mode=Release')
self.assertIn('execution mode (release) for release is inconsistent '
'with build config (debug)', result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testInconsistentArch(self):
"""Test failing run when attempting to wrongly override the arch."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--arch=ia32')
self.assertIn(
'--arch value (ia32) inconsistent with build config (x64).',
result.stdout, result)
self.assertEqual(5, result.returncode, result)
def testWrongVariant(self):
"""Test using a bogus variant."""
with temp_base() as basedir:
result = run_tests(basedir, '--mode=Release', '--variants=meh')
self.assertEqual(5, result.returncode, result)
def testModeFromBuildConfig(self):
"""Test auto-detection of mode from build config."""
with temp_base() as basedir:
result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
self.assertIn('Running tests for x64.release', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@unittest.skip("not available with test processors")
def testReport(self):
"""Test the report feature.
This also exercises various paths in statusfile logic.
"""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default',
'sweet',
'--report',
)
self.assertIn(
'3 tests are expected to fail that we should fix',
result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testWarnUnusedRules(self):
"""Test the unused-rules feature."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default,nooptimization',
'sweet',
'--warn-unused',
)
self.assertIn( 'Unused rule: carrots', result.stdout, result)
self.assertIn( 'Unused rule: regress/', result.stdout, result)
self.assertEqual(1, result.returncode, result)
@unittest.skip("not available with test processors")
def testCatNoSources(self):
"""Test printing sources, but the suite's tests have none available."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--variants=default',
'sweet/bananas',
'--cat',
)
self.assertIn('begin source: sweet/bananas', result.stdout, result)
self.assertIn('(no source available)', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testPredictableProc(self):
self.testPredictable(infra_staging=True)
def testPredictable(self, infra_staging=True):
"""Test running a test in verify-predictable mode.
The test will fail because of missing allocation output. We verify that and
that the predictable flags are passed and printed after failure.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_enable_verify_predictable=True)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Running 1 tests', result.stdout, result)
else:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('1 tests ran', result.stdout, result)
self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
self.assertIn('Test had no allocation output', result.stdout, result)
self.assertIn('--predictable --verify_predictable', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSlowArch(self):
"""Test timeout factor manipulation on slow architecture."""
with temp_base() as basedir:
override_build_config(basedir, v8_target_cpu='arm64')
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'sweet/bananas',
)
# TODO(machenbach): We don't have a way for testing if the correct
# timeout was used.
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithDefaultProc(self):
self.testRandomSeedStressWithDefault(infra_staging=True)
def testRandomSeedStressWithDefault(self, infra_staging=True):
"""Test using random-seed-stress feature has the right number of tests."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'sweet/bananas',
infra_staging=infra_staging,
)
if infra_staging:
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
else:
self.assertIn('Running 2 tests', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testRandomSeedStressWithSeed(self):
"""Test using random-seed-stress feature passing a random seed."""
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default',
'--random-seed-stress-count=2',
'--random-seed=123',
'sweet/strawberries',
)
self.assertIn('Running 1 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
# We use a failing test so that the command is printed and we can verify
# that the right random seed was passed.
self.assertIn('--random-seed=123', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testSpecificVariants(self):
"""Test using NO_VARIANTS modifiers in status files skips the desire tests.
The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
But the status file applies a modifier to each skipping one of the
variants.
"""
with temp_base() as basedir:
override_build_config(basedir, v8_use_snapshot=False)
result = run_tests(
basedir,
'--mode=Release',
'--progress=verbose',
'--variants=default,stress',
'sweet/bananas',
'sweet/raspberries',
)
# Both tests are either marked as running in only default or only
# slow variant.
self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertEqual(0, result.returncode, result)
def testStatusFilePresubmit(self):
"""Test that the fake status file is well-formed."""
with temp_base() as basedir:
from testrunner.local import statusfile
self.assertTrue(statusfile.PresubmitCheck(
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
def testDotsProgressProc(self):
self.testDotsProgress(infra_staging=True)
def testDotsProgress(self, infra_staging=True):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=dots',
'sweet/cherries',
'sweet/bananas',
'--no-sorting', '-j1', # make results order deterministic
infra_staging=infra_staging,
)
if not infra_staging:
self.assertIn('Running 2 tests', result.stdout, result)
else:
self.assertIn('Running 2 base tests', result.stdout, result)
self.assertIn('2 tests ran', result.stdout, result)
self.assertIn('F.', result.stdout, result)
self.assertEqual(1, result.returncode, result)
def testMonoProgressProc(self):
self._testCompactProgress('mono', True)
def testMonoProgress(self):
self._testCompactProgress('mono', False)
def testColorProgressProc(self):
self._testCompactProgress('color', True)
def testColorProgress(self):
self._testCompactProgress('color', False)
def _testCompactProgress(self, name, infra_staging):
with temp_base() as basedir:
result = run_tests(
basedir,
'--mode=Release',
'--progress=%s' % name,
'sweet/cherries',
'sweet/bananas',
infra_staging=infra_staging,
)
if name == 'color':
expected = ('\033[34m% 100\033[0m|'
'\033[32m+ 1\033[0m|'
'\033[31m- 1\033[0m]: Done')
else:
expected = '% 100|+ 1|- 1]: Done'
self.assertIn(expected, result.stdout)
self.assertIn('sweet/cherries', result.stdout)
self.assertIn('sweet/bananas', result.stdout)
self.assertEqual(1, result.returncode, result)
if __name__ == '__main__':
unittest.main()
|
|
import os
import pytest
import pandas as pd
import pandas.util.testing as tm
import ibis
import ibis.expr.datatypes as dt
pytest.importorskip('google.cloud.bigquery')
pytestmark = pytest.mark.bigquery
from ibis.bigquery.api import udf # noqa: E402
PROJECT_ID = os.environ.get('GOOGLE_BIGQUERY_PROJECT_ID', 'ibis-gbq')
DATASET_ID = 'testing'
@pytest.fixture(scope='module')
def client():
ga = pytest.importorskip('google.auth')
try:
return ibis.bigquery.connect(PROJECT_ID, DATASET_ID)
except ga.exceptions.DefaultCredentialsError:
pytest.skip("no credentials found, skipping")
@pytest.fixture(scope='module')
def alltypes(client):
t = client.table('functional_alltypes')
expr = t[t.bigint_col.isin([10, 20])].limit(10)
return expr
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
def test_udf(client, alltypes, df):
@udf(input_type=[dt.double, dt.double], output_type=dt.double)
def my_add(a, b):
return a + b
expr = my_add(alltypes.double_col, alltypes.double_col)
result = expr.execute()
assert not result.empty
expected = (df.double_col + df.double_col).rename('tmp')
tm.assert_series_equal(
result.value_counts().sort_index(),
expected.value_counts().sort_index()
)
def test_udf_with_struct(client, alltypes, df):
@udf(
input_type=[dt.double, dt.double],
output_type=dt.Struct.from_tuples([
('width', dt.double),
('height', dt.double)
])
)
def my_struct_thing(a, b):
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
return Rectangle(a, b)
assert my_struct_thing.js == '''\
CREATE TEMPORARY FUNCTION my_struct_thing_0(a FLOAT64, b FLOAT64)
RETURNS STRUCT<width FLOAT64, height FLOAT64>
LANGUAGE js AS """
'use strict';
function my_struct_thing(a, b) {
class Rectangle {
constructor(width, height) {
this.width = width;
this.height = height;
}
}
return (new Rectangle(a, b));
}
return my_struct_thing(a, b);
""";'''
expr = my_struct_thing(alltypes.double_col, alltypes.double_col)
result = expr.execute()
assert not result.empty
expected = pd.Series(
[{'width': c, 'height': c} for c in df.double_col],
name='tmp'
)
tm.assert_series_equal(result, expected)
def test_udf_compose(client, alltypes, df):
@udf([dt.double], dt.double)
def add_one(x):
return x + 1.0
@udf([dt.double], dt.double)
def times_two(x):
return x * 2.0
t = alltypes
expr = times_two(add_one(t.double_col))
result = expr.execute()
expected = ((df.double_col + 1.0) * 2.0).rename('tmp')
tm.assert_series_equal(result, expected)
def test_udf_scalar(client):
@udf([dt.double, dt.double], dt.double)
def my_add(x, y):
return x + y
expr = my_add(1, 2)
result = client.execute(expr)
assert result == 3
def test_multiple_calls_has_one_definition(client):
@udf([dt.string], dt.double)
def my_str_len(s):
return s.length
s = ibis.literal('abcd')
expr = my_str_len(s) + my_str_len(s)
sql = client.compile(expr)
expected = '''\
CREATE TEMPORARY FUNCTION my_str_len_0(s STRING)
RETURNS FLOAT64
LANGUAGE js AS """
'use strict';
function my_str_len(s) {
return s.length;
}
return my_str_len(s);
""";
SELECT my_str_len_0('abcd') + my_str_len_0('abcd') AS `tmp`'''
assert sql == expected
result = client.execute(expr)
assert result == 8.0
def test_udf_libraries(client):
@udf(
[dt.Array(dt.string)],
dt.double,
# whatever symbols are exported in the library are visible inside the
# UDF, in this case lodash defines _ and we use that here
libraries=['gs://ibis-testing-libraries/lodash.min.js']
)
def string_length(strings):
return _.sum(_.map(strings, lambda x: x.length)) # noqa: F821
raw_data = ['aaa', 'bb', 'c']
data = ibis.literal(raw_data)
expr = string_length(data)
result = client.execute(expr)
expected = sum(map(len, raw_data))
assert result == expected
def test_udf_with_len(client):
@udf([dt.string], dt.double)
def my_str_len(x):
return len(x)
@udf([dt.Array(dt.string)], dt.double)
def my_array_len(x):
return len(x)
assert client.execute(my_str_len('aaa')) == 3
assert client.execute(my_array_len(['aaa', 'bb'])) == 2
def test_multiple_calls_redefinition(client):
@udf([dt.string], dt.double)
def my_len(s):
return s.length
s = ibis.literal('abcd')
expr = my_len(s) + my_len(s)
@udf([dt.string], dt.double)
def my_len(s):
return s.length + 1
expr = expr + my_len(s)
sql = client.compile(expr)
expected = '''\
CREATE TEMPORARY FUNCTION my_len_0(s STRING)
RETURNS FLOAT64
LANGUAGE js AS """
'use strict';
function my_len(s) {
return s.length;
}
return my_len(s);
""";
CREATE TEMPORARY FUNCTION my_len_1(s STRING)
RETURNS FLOAT64
LANGUAGE js AS """
'use strict';
function my_len(s) {
return (s.length + 1);
}
return my_len(s);
""";
SELECT (my_len_0('abcd') + my_len_0('abcd')) + my_len_1('abcd') AS `tmp`'''
assert sql == expected
@pytest.mark.parametrize(
('argument_type', 'return_type'),
[
pytest.mark.xfail((dt.int64, dt.float64), raises=TypeError),
pytest.mark.xfail((dt.float64, dt.int64), raises=TypeError),
# complex argument type, valid return type
pytest.mark.xfail((dt.Array(dt.int64), dt.float64), raises=TypeError),
# valid argument type, complex invalid return type
pytest.mark.xfail(
(dt.float64, dt.Array(dt.int64)), raises=TypeError),
# both invalid
pytest.mark.xfail(
(dt.Array(dt.Array(dt.int64)), dt.int64), raises=TypeError),
# struct type with nested integer, valid return type
pytest.mark.xfail(
(dt.Struct.from_tuples([('x', dt.Array(dt.int64))]), dt.float64),
raises=TypeError,
)
]
)
def test_udf_int64(client, argument_type, return_type):
# invalid argument type, valid return type
@udf([argument_type], return_type)
def my_int64_add(x):
return 1.0
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import numpy as np
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super(TickLabels, self).__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.pad = 0.3
self._exclude_overlapping = False
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
for j in range(len(t1)):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
if start == 0:
t1 = self.text[axis][i]
else:
self.text[axis][i] = self.text[axis][i][start:]
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def draw(self, renderer, bboxes, ticklabels_bbox):
if not self.get_visible():
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
self.set_text(self.text[axis][i])
x, y = self.pixel[axis][i]
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = - text_size * 0.5
dy = - text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = - text_size * 1.5
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = text_size * 0.5
dy = - text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = text_size * 0.2
self.set_position((x + dx, y + dy))
self.set_ha(ha)
self.set_va(va)
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_position((x, y))
bb = super(TickLabels, self).get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * text_size * self.pad
dy += ddy * text_size * self.pad
self.set_position((x - dx, y - dy))
self.set_ha('center')
self.set_va('center')
bb = super(TickLabels, self).get_window_extent(renderer)
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super(TickLabels, self).draw(renderer)
bboxes.append(bb)
ticklabels_bbox.append(bb)
|
|
from urllib import quote
from datetime import datetime
from django.test import Client
from questionnaire.forms.sections import SectionForm, SubSectionForm
from questionnaire.models import Questionnaire, Section, SubSection, Question, QuestionGroup, QuestionOption, \
MultiChoiceAnswer, NumericalAnswer, QuestionGroupOrder, AnswerGroup, Answer, Country, TextAnswer, DateAnswer, \
Region
from questionnaire.services.questionnaire_entry_form_service import QuestionnaireEntryFormService
from questionnaire.tests.base_test import BaseTest
from questionnaire.tests.factories.section_factory import SectionFactory
from questionnaire.tests.factories.questionnaire_factory import QuestionnaireFactory
from questionnaire.tests.factories.region_factory import RegionFactory
from questionnaire.tests.factories.answer_factory import NumericalAnswerFactory
class QuestionnaireEntrySaveDraftTest(BaseTest):
def setUp(self):
self.client = Client()
self.user = self.create_user(group=self.DATA_SUBMITTER, country="Uganda", region="AFRO")
self.country = self.user.user_profile.country
self.region = self.user.user_profile.country.regions.all()[0]
self.assign('can_submit_responses', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", status=Questionnaire.PUBLISHED,
description="From dropbox as given by Rouslan",
region=self.region)
self.section_1 = Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)",
order=1,
questionnaire=self.questionnaire, name="Reported Cases")
self.sub_section = SubSection.objects.create(title="Reported cases for the year 2013", order=1,
section=self.section_1)
self.question1 = Question.objects.create(text='Disease', UID='C00001', answer_type='MultiChoice',
is_primary=True)
self.question2 = Question.objects.create(text='B. Number of cases tested',
instructions="Enter the total number of cases for which specimens were collected, and tested in laboratory",
UID='C00003', answer_type='Number')
self.question3 = Question.objects.create(text='C. Number of cases positive',
instructions="Include only those cases found positive for the infectious agent.",
UID='C00004', answer_type='Number')
self.option1 = QuestionOption.objects.create(text='tusker lager', question=self.question1)
self.option2 = QuestionOption.objects.create(text='tusker lager1', question=self.question1)
self.option3 = QuestionOption.objects.create(text='tusker lager2', question=self.question1)
self.question_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
self.question_group.question.add(self.question1, self.question3, self.question2)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=self.question1, order=1)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=self.question2, order=2)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=self.question3, order=3)
self.url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
self.data = {u'MultiChoice-MAX_NUM_FORMS': u'1', u'MultiChoice-TOTAL_FORMS': u'1',
u'MultiChoice-INITIAL_FORMS': u'1', u'MultiChoice-0-response': self.option1.id,
u'Number-INITIAL_FORMS': u'2', u'Number-TOTAL_FORMS': u'2', u'Number-MAX_NUM_FORMS': u'2',
u'Number-0-response': u'2', u'Number-1-response': u'33'}
def test_get_questionnaire_entry_view(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('questionnaires/entry/index.html', templates)
self.assertEqual(self.questionnaire, response.context['questionnaire'])
self.assertEqual(self.section_1, response.context['section'])
self.assertIsInstance(response.context['formsets'], QuestionnaireEntryFormService)
def test_gets_ordered_sections_for_menu_breadcrumps_wizzard(self):
section2 = Section.objects.create(title="section 2", order=2, questionnaire=self.questionnaire)
section3 = Section.objects.create(title="section 3", order=3, questionnaire=self.questionnaire)
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
self.assertEqual(3, response.context['ordered_sections'].count())
self.assertEqual(self.section_1, response.context['ordered_sections'][0])
self.assertEqual(section2, response.context['ordered_sections'][1])
self.assertEqual(section3, response.context['ordered_sections'][2])
self.assertEqual(False, response.context['printable'])
self.assertEqual(False, response.context['preview'])
self.assertTrue('documents' in response.context)
self.assertIsInstance(response.context['section_form'], SectionForm)
self.assertEqual(response.context['new_section_action'],
'/questionnaire/entry/%s/section/new/' % self.questionnaire.id)
self.assertIsInstance(response.context['subsection_form'], SubSectionForm)
self.assertEqual(response.context['subsection_action'], '/questionnaire/entry/%s/section/%s/subsection/new/' %
(self.questionnaire.id, self.section_1.id))
def test_gets_version_if_in_get_params(self):
response = self.client.get(self.url)
self.assertEqual(1, response.context['the_version'])
url = '/questionnaire/entry/%d/section/%d/?version=1' % (self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual('1', response.context['the_version'])
self.assertEqual(self.country, response.context['country'])
def test_gets_ordered_sections_for_only_the_questionnaire_in_get_params(self):
questionnaire_2 = Questionnaire.objects.create(name="JRF 2013 Core English", status=Questionnaire.FINALIZED,
description="From dropbox as given by Rouslan")
questionnaire_2_section = Section.objects.create(title="section 3", order=3, questionnaire=questionnaire_2)
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
self.assertEqual(self.section_1, response.context['ordered_sections'][0])
self.assertNotIn(questionnaire_2_section, response.context['ordered_sections'])
def test_gets_printable_as_true_if_set_in_request(self):
url = '/questionnaire/entry/%d/section/%d/?printable=true&preview=1' % (
self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(True, response.context['printable'])
self.assertEqual(True, response.context['preview'])
def test_login_required(self):
self.assert_login_required('/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id))
def test_permission_required(self):
self.assert_permission_required(
'/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id))
def test_POST_is_restricted_to_data_submitters_only(self):
user = self.create_user(username="Global", group=self.GLOBAL_ADMIN, org="WHO")
self.region = Region.objects.create(name="AFRO")
self.assign('can_edit_questionnaire', user)
self.client.logout()
self.client.login(username='ga', password='pass')
response = self.client.post(self.url, data=self.data)
self.assertRedirects(response, expected_url='/accounts/login/?next=%s' % quote(self.url),
status_code=302, target_status_code=200, msg_prefix='')
def test_post_saves_answers(self):
data = self.data
self.failIf(MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response'])))
response = self.client.post(self.url, data=data)
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('questionnaires/entry/index.html', templates)
self.failUnless(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response']), question=self.question1))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-0-response']), question=self.question2))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-1-response']), question=self.question3))
def test_post_groups_rows_into_answer_groups(self):
Answer.objects.select_subclasses().delete()
data = self.data
self.failIf(MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response'])))
response = self.client.post(self.url, data=data)
self.assertEqual(200, response.status_code)
primary = MultiChoiceAnswer.objects.get(response__id=int(data['MultiChoice-0-response']),
question=self.question1)
answer_1 = NumericalAnswer.objects.get(response=int(data['Number-0-response']), question=self.question2)
answer_2 = NumericalAnswer.objects.get(response=int(data['Number-1-response']), question=self.question3)
answer_group = AnswerGroup.objects.filter(grouped_question=self.question_group)
self.assertEqual(3, answer_group.count())
self.assertEqual(1, primary.answergroup.count())
self.assertEqual(1, answer_1.answergroup.count())
self.assertEqual(1, answer_2.answergroup.count())
def test_successful_post_shows_success_message(self):
data = self.data
self.failIf(MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response'])))
response = self.client.post(self.url, data=data)
self.assertEqual(200, response.status_code)
expected_message = 'Draft saved.'
self.assertIn(expected_message, response.content)
def test_post_failure_does_not_save_answers_and_does_not_redirect(self):
data = self.data
data[u'MultiChoice-0-response'] = -1
self.failIf(MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response'])))
response = self.client.post(self.url, data=data)
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('questionnaires/entry/index.html', templates)
self.failIf(MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response'])))
self.failIf(AnswerGroup.objects.filter(grouped_question=self.question_group))
expected_message = 'Draft NOT saved. See errors below.'
self.assertIn(expected_message, response.content)
def test_post_on_section_with_draft_answers_modify_original_draft_answers_and_not_create_new_instance(self):
data = self.data
self.client.post(self.url, data=data)
old_primary = MultiChoiceAnswer.objects.get(response__id=int(data['MultiChoice-0-response']),
question=self.question1)
old_answer_1 = NumericalAnswer.objects.get(response=int(data['Number-0-response']), question=self.question2)
old_answer_2 = NumericalAnswer.objects.get(response=int(data['Number-1-response']), question=self.question3)
data_modified = data.copy()
data_modified['MultiChoice-0-response'] = self.option2.id
data_modified['Number-1-response'] = '3'
response = self.client.post(self.url, data=data_modified)
self.assertEqual(200, response.status_code)
primary = MultiChoiceAnswer.objects.get(response__id=int(data_modified['MultiChoice-0-response']),
question=self.question1, version=1)
answer_1 = NumericalAnswer.objects.get(response=int(data_modified['Number-0-response']),
question=self.question2, version=1)
answer_2 = NumericalAnswer.objects.get(response=int(data_modified['Number-1-response']),
question=self.question3, version=1)
self.assertEqual(old_primary.id, primary.id)
self.assertEqual(old_answer_1.id, answer_1.id)
self.assertEqual(old_answer_2.id, answer_2.id)
answer_group = AnswerGroup.objects.filter(grouped_question=self.question_group)
self.assertEqual(3, answer_group.count())
def test_post_after_submit_save_new_draft_version(self):
data = self.data.copy()
self.client.post(self.url, data=data)
self.client.post('/submit/%d' % self.questionnaire.id)
old_primary = MultiChoiceAnswer.objects.get(response__id=int(data['MultiChoice-0-response']),
question=self.question1, version=1)
old_answer_1 = NumericalAnswer.objects.get(response=int(data['Number-0-response']), question=self.question2,
version=1)
old_answer_2 = NumericalAnswer.objects.get(response=int(data['Number-1-response']), question=self.question3,
version=1)
self.assertEqual(Answer.SUBMITTED_STATUS, old_primary.status)
self.assertEqual(Answer.SUBMITTED_STATUS, old_answer_1.status)
self.assertEqual(Answer.SUBMITTED_STATUS, old_answer_2.status)
answer_group = AnswerGroup.objects.filter(grouped_question=self.question_group)
self.assertEqual(3, answer_group.count())
data = self.data
self.client.post(self.url, data=data)
primary = MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response']),
question=self.question1, version=2)
answer_1 = NumericalAnswer.objects.filter(response=int(data['Number-0-response']), question=self.question2,
version=2)
answer_2 = NumericalAnswer.objects.filter(response=int(data['Number-1-response']), question=self.question3,
version=2)
self.assertEqual(1, primary.count())
self.assertEqual(Answer.DRAFT_STATUS, primary[0].status)
self.assertEqual(2, primary[0].version)
self.assertEqual(1, answer_1.count())
self.assertEqual(Answer.DRAFT_STATUS, answer_1[0].status)
self.assertEqual(2, answer_1[0].version)
self.assertEqual(1, answer_2.count())
self.assertEqual(Answer.DRAFT_STATUS, answer_2[0].status)
self.assertEqual(2, answer_2[0].version)
def test_post_saves_answers_and_redirect_to_no_preview_if_given_redirect_url(self):
data = self.data
self.failIf(MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response'])))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response'])))
section_2 = Section.objects.create(name="haha", questionnaire=self.questionnaire, order=2)
data['redirect_url'] = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, section_2.id)
response = self.client.post(self.url + "?preview=1", data=data)
self.failUnless(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response']), question=self.question1))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-0-response']), question=self.question2))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-1-response']), question=self.question3))
self.assertRedirects(response, data['redirect_url'])
def test_get_preview_for_version(self):
version = 1
country = Country.objects.create(name="Kenya")
url = '/questionnaire/entry/%s/section/%d/?country=%s&version=%s' % (
self.questionnaire.id, self.section_1.id, country.id, version)
self.initial = {'country': country, 'status': 'Draft', 'version': 1, 'code': 'ABC123',
'questionnaire': self.questionnaire}
version_1_primary_answer = MultiChoiceAnswer.objects.create(response=self.option1, question=self.question1,
**self.initial)
version_1_answer_1 = NumericalAnswer.objects.create(response=4, question=self.question2, **self.initial)
version_1_answer_2 = NumericalAnswer.objects.create(response=2, question=self.question3, **self.initial)
answer_group = AnswerGroup.objects.create(grouped_question=self.question_group)
answer_group.answer.add(version_1_answer_1, version_1_answer_2, version_1_primary_answer)
response = self.client.get(url)
formsets = response.context['formsets']
self.assertIsInstance(formsets, QuestionnaireEntryFormService)
self.assertEqual(self.section_1, formsets.section)
section1_formsets = formsets.formsets
self.assertEqual(self.question1, section1_formsets['MultiChoice'][0].initial['question'])
self.assertEqual(self.question2, section1_formsets['Number'][0].initial['question'])
self.assertEqual(self.question3, section1_formsets['Number'][1].initial['question'])
self.assertEqual(version_1_primary_answer.response, section1_formsets['MultiChoice'][0].initial['response'])
self.assertEqual(str(version_1_answer_1), section1_formsets['Number'][0].initial['response'])
self.assertEqual(version_1_answer_2.response, int(section1_formsets['Number'][1].initial['response']))
self.assertEqual(version_1_answer_1, section1_formsets['Number'][0].initial['answer'])
self.assertEqual(version_1_answer_2, section1_formsets['Number'][1].initial['answer'])
self.assertEqual(version_1_primary_answer, section1_formsets['MultiChoice'][0].initial['answer'])
class SaveGridDraftQuestionGroupEntryTest(BaseTest):
def setUp(self):
AnswerGroup.objects.all().delete()
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English")
self.section1 = Section.objects.create(title="Reported Cases of Selected Vaccine", order=1,
questionnaire=self.questionnaire, name="Reported Cases")
self.sub_section = SubSection.objects.create(title="subsection 1", order=1, section=self.section1)
self.question_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1, grid=True,
display_all=True)
self.question1 = Question.objects.create(text='Favorite beer 1', UID='C00001', answer_type='MultiChoice',
is_primary=True)
self.option1 = QuestionOption.objects.create(text='tusker lager', question=self.question1)
self.option2 = QuestionOption.objects.create(text='tusker lager1', question=self.question1)
self.option3 = QuestionOption.objects.create(text='tusker lager2', question=self.question1)
self.question2 = Question.objects.create(text='question 2', instructions="instruction 2",
UID='C00002', answer_type='Text')
self.question3 = Question.objects.create(text='question 3', instructions="instruction 3",
UID='C00003', answer_type='Number')
self.question4 = Question.objects.create(text='question 4', instructions="instruction 2",
UID='C00005', answer_type='Date')
self.question_group.question.add(self.question1, self.question3, self.question2, self.question4)
QuestionGroupOrder.objects.create(question=self.question1, question_group=self.question_group, order=1)
QuestionGroupOrder.objects.create(question=self.question2, question_group=self.question_group, order=2)
QuestionGroupOrder.objects.create(question=self.question3, question_group=self.question_group, order=3)
QuestionGroupOrder.objects.create(question=self.question4, question_group=self.question_group, order=4)
self.country = Country.objects.create(name="Uganda")
self.version = 1
self.initial = {'country': self.country, 'status': 'Draft', 'version': self.version, 'code': 'ABC123',
'questionnaire': self.questionnaire}
self.data = {u'MultiChoice-MAX_NUM_FORMS': u'3', u'MultiChoice-TOTAL_FORMS': u'3',
u'MultiChoice-INITIAL_FORMS': u'3', u'MultiChoice-0-response': self.option1.id,
u'MultiChoice-1-response': self.option2.id, u'MultiChoice-2-response': self.option3.id,
u'Number-MAX_NUM_FORMS': u'3', u'Number-TOTAL_FORMS': u'3',
u'Number-INITIAL_FORMS': u'3', u'Number-0-response': '22',
u'Number-1-response': '44', u'Number-2-response': '33',
u'Text-MAX_NUM_FORMS': u'3', u'Text-TOTAL_FORMS': u'3',
u'Text-INITIAL_FORMS': u'3', u'Text-0-response': 'Haha',
u'Text-1-response': 'Hehe', u'Text-2-response': 'hehehe',
u'Date-MAX_NUM_FORMS': u'3', u'Date-TOTAL_FORMS': u'3',
u'Date-INITIAL_FORMS': u'3', u'Date-0-response': '2014-02-02',
u'Date-1-response': '2014-10-02', u'Date-2-response': '2014-03-02',
}
self.url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section1.id)
self.client = Client()
self.user = self.create_user(group=self.DATA_SUBMITTER, country="Uganda", region="AFRO")
self.country = self.user.user_profile.country
self.region = self.user.user_profile.country.regions.all()[0]
self.assign('can_submit_responses', self.user)
self.client.login(username=self.user.username, password='pass')
def test_post_grid_view_saves_drafts(self):
data = self.data
self.failIf(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response']), question=self.question1))
self.failIf(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-1-response']), question=self.question1))
self.failIf(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-2-response']), question=self.question1))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-0-response']), question=self.question3))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-1-response']), question=self.question3))
self.failIf(NumericalAnswer.objects.filter(response=int(data['Number-2-response']), question=self.question3))
self.failIf(TextAnswer.objects.filter(response=data['Text-0-response'], question=self.question2))
self.failIf(TextAnswer.objects.filter(response=data['Text-1-response'], question=self.question2))
self.failIf(TextAnswer.objects.filter(response=data['Text-2-response'], question=self.question2))
self.failIf(DateAnswer.objects.filter(response=data['Date-0-response'], question=self.question4))
self.failIf(DateAnswer.objects.filter(response=data['Date-1-response'], question=self.question4))
self.failIf(DateAnswer.objects.filter(response=data['Date-2-response'], question=self.question4))
response = self.client.post(self.url, data=data)
self.failUnless(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-0-response']), question=self.question1))
self.failUnless(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-1-response']), question=self.question1))
self.failUnless(
MultiChoiceAnswer.objects.filter(response__id=int(data['MultiChoice-2-response']), question=self.question1))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-0-response']), question=self.question3))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-1-response']), question=self.question3))
self.failUnless(
NumericalAnswer.objects.filter(response=int(data['Number-2-response']), question=self.question3))
self.failUnless(TextAnswer.objects.filter(response=data['Text-0-response'], question=self.question2))
self.failUnless(TextAnswer.objects.filter(response=data['Text-1-response'], question=self.question2))
self.failUnless(TextAnswer.objects.filter(response=data['Text-2-response'], question=self.question2))
self.failUnless(DateAnswer.objects.filter(response=data['Date-0-response'], question=self.question4))
self.failUnless(DateAnswer.objects.filter(response=data['Date-1-response'], question=self.question4))
self.failUnless(DateAnswer.objects.filter(response=data['Date-2-response'], question=self.question4))
self.assertEqual(200, response.status_code)
expected_message = 'Draft saved.'
self.assertIn(expected_message, response.content)
def test_post_grid_form_creates_groups_for_each_primary_question_option(self):
data = self.data
response = self.client.post(self.url, data=data)
self.assertEqual(200, response.status_code)
self.assertIn('Draft saved.', response.content)
self.assertEqual(3, AnswerGroup.objects.count())
def test_post_saves_answers_in_correct_answer_groups_rows(self):
data = self.data
self.client.post(self.url, data=data)
option_1_answer = MultiChoiceAnswer.objects.get(response=self.option1)
group1 = option_1_answer.answergroup.all()
self.assertEqual(1, group1.count())
group1_answers = group1[0].answer.all().select_subclasses()
self.assertEqual(4, group1_answers.count())
self.assertIn(NumericalAnswer.objects.get(response=int(data['Number-0-response']), question=self.question3),
group1_answers)
self.assertIn(TextAnswer.objects.get(response=data['Text-0-response'], question=self.question2), group1_answers)
self.assertIn(DateAnswer.objects.get(response=data['Date-0-response'], question=self.question4), group1_answers)
option_2_answer = MultiChoiceAnswer.objects.get(response=self.option2)
group2 = option_2_answer.answergroup.all()
self.assertEqual(1, group1.count())
group2_answers = group2[0].answer.all().select_subclasses()
self.assertEqual(4, group2_answers.count())
self.assertIn(NumericalAnswer.objects.get(response=int(data['Number-1-response']), question=self.question3),
group2_answers)
self.assertIn(TextAnswer.objects.get(response=data['Text-1-response'], question=self.question2), group2_answers)
self.assertIn(DateAnswer.objects.get(response=data['Date-1-response'], question=self.question4), group2_answers)
option_3_answer = MultiChoiceAnswer.objects.get(response=self.option3)
group3 = option_3_answer.answergroup.all()
self.assertEqual(1, group3.count())
group3_answers = group3[0].answer.all().select_subclasses()
self.assertEqual(4, group3_answers.count())
self.assertIn(NumericalAnswer.objects.get(response=int(data['Number-2-response']), question=self.question3),
group3_answers)
self.assertIn(TextAnswer.objects.get(response=data['Text-2-response'], question=self.question2), group3_answers)
self.assertIn(DateAnswer.objects.get(response=data['Date-2-response'], question=self.question4), group3_answers)
def given_I_have_questions_and_corresponding_submitted_answers_in_a_section(self):
section = Section.objects.create(title="another section", order=2, questionnaire=self.questionnaire,
name="haha")
sub_section = SubSection.objects.create(title="subsection in another section", order=1, section=section)
question1 = Question.objects.create(text='q1', UID='C00011', answer_type='MultiChoice')
question2 = Question.objects.create(text='q2', UID='C00033', answer_type='Number')
question3 = Question.objects.create(text='q3', UID='C00034', answer_type='Number')
option1 = QuestionOption.objects.create(text='tusker lager', question=question1)
option2 = QuestionOption.objects.create(text='tusker lager1', question=question1)
option3 = QuestionOption.objects.create(text='tusker lager2', question=question1)
question_group = QuestionGroup.objects.create(subsection=sub_section, order=1)
question_group.question.add(question1, question3, question2)
QuestionGroupOrder.objects.create(question_group=question_group, question=question1, order=1)
QuestionGroupOrder.objects.create(question_group=question_group, question=question2, order=2)
QuestionGroupOrder.objects.create(question_group=question_group, question=question3, order=3)
data = {u'MultiChoice-MAX_NUM_FORMS': u'1', u'MultiChoice-TOTAL_FORMS': u'1',
u'MultiChoice-INITIAL_FORMS': u'1', u'MultiChoice-0-response': option1.id,
u'Number-INITIAL_FORMS': u'2', u'Number-TOTAL_FORMS': u'2', u'Number-MAX_NUM_FORMS': u'2',
u'Number-0-response': u'2', u'Number-1-response': u'33'}
initial = self.initial.copy()
initial['status'] = Answer.SUBMITTED_STATUS
old_primary = MultiChoiceAnswer.objects.create(response=option1, question=question1, **initial)
old_answer_1 = NumericalAnswer.objects.create(response=int(data['Number-0-response']), question=question2,
**initial)
old_answer_2 = NumericalAnswer.objects.create(response=int(data['Number-1-response']), question=question3,
**initial)
old_primary.answergroup.create(grouped_question=question_group)
old_answer_1.answergroup.create(grouped_question=question_group)
old_answer_2.answergroup.create(grouped_question=question_group)
data_modified = data.copy()
data_modified['MultiChoice-0-response'] = option2.id
data_modified['Number-1-response'] = '3'
old_answer = [old_primary, old_answer_1, old_answer_2]
questions = [question1, question2, question3]
return questions, question_group, old_answer, data_modified, section
def and_given_I_have_other_questions_and_corresponding_answers_in_a_different_section(self):
question1_answer = []
question2_answer = []
question3_answer = []
answer_group1 = []
for index, option in enumerate(self.question1.options.order_by('modified')):
question1_answer.append(MultiChoiceAnswer.objects.create(question=self.question1, country=self.country,
status=Answer.SUBMITTED_STATUS, response=option,
version=self.version,
questionnaire=self.questionnaire))
question2_answer.append(TextAnswer.objects.create(question=self.question2, country=self.country,
status=Answer.SUBMITTED_STATUS,
response="ayoyoyo %d" % index,
version=self.version, questionnaire=self.questionnaire))
question3_answer.append(NumericalAnswer.objects.create(question=self.question3, country=self.country,
status=Answer.SUBMITTED_STATUS, response=index,
version=self.version,
questionnaire=self.questionnaire))
answer_group1.append(
question1_answer[index].answergroup.create(grouped_question=self.question_group, row=index))
answer_group1[index].answer.add(question2_answer[index], question3_answer[index])
def test_post_drafts_duplicates_submitted_answers_and_answer_groups_of_other_sections_and_save_draft_of_section_when_editing_submitted_questionnaire(
self):
questions, question_group, old_answer, data_modified, section = self.given_I_have_questions_and_corresponding_submitted_answers_in_a_section()
self.and_given_I_have_other_questions_and_corresponding_answers_in_a_different_section()
initial = self.initial.copy()
initial['version'] = self.version + 1
response = self.client.post("/questionnaire/entry/%d/section/%d/" % (self.questionnaire.id, section.id),
data=data_modified)
self.assertEqual(200, response.status_code)
self.assertIn('Draft saved.', response.content)
primary = MultiChoiceAnswer.objects.get(response__id=int(data_modified['MultiChoice-0-response']),
question=questions[0], version=self.version + 1)
answer_1 = NumericalAnswer.objects.get(response=int(data_modified['Number-0-response']), question=questions[1],
version=self.version + 1)
answer_2 = NumericalAnswer.objects.get(response=int(data_modified['Number-1-response']), question=questions[2],
version=self.version + 1)
self.assertNotEqual(old_answer[0].id, primary.id)
self.assertNotEqual(old_answer[1].id, answer_1.id)
self.assertNotEqual(old_answer[2].id, answer_2.id)
self.assertEqual(primary.status, Answer.DRAFT_STATUS)
self.assertEqual(answer_1.status, Answer.DRAFT_STATUS)
self.assertEqual(answer_2.status, Answer.DRAFT_STATUS)
self.failUnless(primary.answergroup.filter(grouped_question=question_group))
self.failUnless(answer_1.answergroup.filter(grouped_question=question_group))
self.failUnless(answer_2.answergroup.filter(grouped_question=question_group))
for index, option in enumerate(self.question1.options.order_by('modified')):
question1_answer = MultiChoiceAnswer.objects.filter(question=self.question1, country=self.country,
status=Answer.DRAFT_STATUS, response=option,
version=self.version + 1,
questionnaire=self.questionnaire)
question2_answer = TextAnswer.objects.filter(question=self.question2, country=self.country,
status=Answer.DRAFT_STATUS, response="ayoyoyo %d" % index,
version=self.version + 1, questionnaire=self.questionnaire)
question3_answer = NumericalAnswer.objects.filter(question=self.question3, country=self.country,
status=Answer.DRAFT_STATUS, response=index,
version=self.version + 1,
questionnaire=self.questionnaire)
self.failUnless(question1_answer)
self.assertEqual(1, question1_answer.count())
self.failUnless(question2_answer)
self.assertEqual(1, question2_answer.count())
self.failUnless(question3_answer)
self.assertEqual(1, question3_answer.count())
answer_group1 = question1_answer[0].answergroup.filter(grouped_question=self.question_group, row=index)
self.failUnless(answer_group1)
answer_group_answers = answer_group1[0].answer.all().select_subclasses()
self.assertEqual(3, answer_group_answers.count())
self.assertIn(question2_answer[0], answer_group_answers)
self.assertIn(question3_answer[0], answer_group_answers)
class QuestionnaireEntrySubmitTest(BaseTest):
def setUp(self):
self.client = Client()
self.user = self.create_user(group=self.DATA_SUBMITTER, country="Uganda", region="AFRO")
self.country = self.user.user_profile.country
self.region = self.user.user_profile.country.regions.all()[0]
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", status=Questionnaire.PUBLISHED,
description="From dropbox as given by Rouslan",
region=self.region)
self.section_1 = Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)",
order=1,
questionnaire=self.questionnaire, name="Reported Cases")
self.sub_section = SubSection.objects.create(title="Reported cases for the year 2013", order=1,
section=self.section_1)
self.question1 = Question.objects.create(text='Disease', UID='C00001', answer_type='MultiChoice')
self.question2 = Question.objects.create(text='B. Number of cases tested',
instructions="Enter the total number of cases for which specimens were collected, and tested in laboratory",
UID='C00003', answer_type='Number')
self.question3 = Question.objects.create(text='C. Number of cases positive',
instructions="Include only those cases found positive for the infectious agent.",
UID='C00004', answer_type='Number')
self.option1 = QuestionOption.objects.create(text='tusker lager', question=self.question1)
self.option2 = QuestionOption.objects.create(text='tusker lager1', question=self.question1)
self.option3 = QuestionOption.objects.create(text='tusker lager2', question=self.question1)
self.question_group = QuestionGroup.objects.create(subsection=self.sub_section, order=1)
self.question_group.question.add(self.question1, self.question3, self.question2)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=self.question1, order=1)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=self.question2, order=2)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=self.question3, order=3)
self.url = '/submit/%d' % self.questionnaire.id
self.assign('can_submit_responses', self.user)
self.client.login(username=self.user.username, password='pass')
self.data = {u'MultiChoice-MAX_NUM_FORMS': u'1', u'MultiChoice-TOTAL_FORMS': u'1',
u'MultiChoice-INITIAL_FORMS': u'1', u'MultiChoice-0-response': self.option1.id,
u'Number-INITIAL_FORMS': u'2', u'Number-TOTAL_FORMS': u'2', u'Number-MAX_NUM_FORMS': u'2',
u'Number-0-response': u'2', u'Number-1-response': u'33'}
def test_login_required(self):
self.assert_login_required(self.url)
def test_permission_required(self):
self.assert_permission_required(self.url)
def test_POST_is_restricted_to_data_submitters_only(self):
global_admin = self.create_user(username="new user", group=self.GLOBAL_ADMIN, org="WHO")
self.region = Region.objects.create(name="AFRO")
self.assign('can_edit_questionnaire', global_admin)
client = Client()
client.login(username='ga', password='pass')
response = client.post(self.url, data=self.data)
self.assertRedirects(response, expected_url='/accounts/login/?next=%s' % quote(self.url),
status_code=302, target_status_code=200, msg_prefix='')
def test_submit_changes_all_answers_statuses_to_submitted(self):
other_section_1 = Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)",
order=2, questionnaire=self.questionnaire, name="Reported Cases")
other_sub_section = SubSection.objects.create(title="Reported cases for the year 2013", order=1,
section=other_section_1)
other_question1 = Question.objects.create(text='other question 1', UID='C00011', answer_type='Number')
other_question2 = Question.objects.create(text='other question 2', UID='C00012', answer_type='Number')
other_question_group = QuestionGroup.objects.create(subsection=other_sub_section, order=1)
other_question_group.question.add(other_question1, other_question2)
QuestionGroupOrder.objects.create(question=other_question1, order=1, question_group=other_question_group)
QuestionGroupOrder.objects.create(question=other_question2, order=2, question_group=other_question_group)
other_answer_1 = NumericalAnswer.objects.create(response=1, question=other_question1,
status=Answer.DRAFT_STATUS,
country=self.country, version=0,
questionnaire=self.questionnaire)
other_answer_2 = NumericalAnswer.objects.create(response=2, question=other_question2,
status=Answer.DRAFT_STATUS,
country=self.country, version=0,
questionnaire=self.questionnaire)
answer_group = AnswerGroup.objects.create(grouped_question=other_question_group)
answer_group.answer.add(other_answer_1, other_answer_2)
self.client.post(self.url)
other_answer_1 = NumericalAnswer.objects.get(response=1, question=other_question1, country=self.country,
version=0, questionnaire=self.questionnaire)
other_answer_2 = NumericalAnswer.objects.get(response=2, question=other_question2, country=self.country,
version=0, questionnaire=self.questionnaire)
self.assertEqual(Answer.SUBMITTED_STATUS, other_answer_1.status)
self.assertEqual(Answer.SUBMITTED_STATUS, other_answer_2.status)
answer_group = AnswerGroup.objects.filter(grouped_question=other_question_group)
self.assertEqual(1, answer_group.count())
def test_submit_on_success_redirect_to_referer_if_given_and_adds_preview_get_param(self):
referer_url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
meta = {'HTTP_REFERER': referer_url}
response = self.client.post(self.url, **meta)
self.assertRedirects(response, referer_url + "?preview=1")
def test_submit_on_success_redirect_to_referer_if_given(self):
referer_url = '/questionnaire/entry/%d/section/%d/?preview=1' % (self.questionnaire.id, self.section_1.id)
meta = {'HTTP_REFERER': referer_url}
response = self.client.post(self.url, **meta)
self.assertRedirects(response, referer_url)
def test_submit_on_success_redirect_to_referer__does_not_highlight_errors_and_shows_preview(self):
referer_url = '/questionnaire/entry/%d/section/%d/?show=errors' % (self.questionnaire.id, self.section_1.id)
referer_url_no_error_yes_preview = '/questionnaire/entry/%d/section/%d/?preview=1' % (
self.questionnaire.id, self.section_1.id)
meta = {'HTTP_REFERER': referer_url}
response = self.client.post(self.url, **meta)
self.assertRedirects(response, referer_url_no_error_yes_preview)
def test_submit_success_message(self):
response = self.client.post(self.url)
success_message = 'Questionnaire Submitted.'
self.assertIn(success_message, response.cookies['messages'].value)
def test_submit_fails_and_shows_sections_with_error_message_and_error_fields_when_a_section_has_unanswered_required_questions(
self):
data = self.data.copy()
initial = {'country': self.country, 'status': 'Draft', 'version': 1, 'code': 'ABC123',
'questionnaire': self.questionnaire}
old_primary = MultiChoiceAnswer.objects.create(response=self.option1, question=self.question1, **initial)
old_answer_1 = NumericalAnswer.objects.create(response=int(data['Number-0-response']), question=self.question2,
**initial)
old_answer_2 = NumericalAnswer.objects.create(response=int(data['Number-1-response']), question=self.question3,
**initial)
answer_group = AnswerGroup.objects.create(grouped_question=self.question_group)
answer_group.answer.add(old_primary, old_answer_1, old_answer_2)
required_question = Question.objects.create(text='required', UID='C00330', answer_type='Number',
is_required=True)
self.question_group.question.add(required_question)
QuestionGroupOrder.objects.create(question_group=self.question_group, question=required_question, order=4)
response = self.client.post(self.url)
section_with_errrors_url = '/questionnaire/entry/%d/section/%d/?show=errors' % (
self.questionnaire.id, self.section_1.id)
self.assertRedirects(response, section_with_errrors_url)
error_message = 'Questionnaire NOT submitted. See errors below.'
self.assertIn(error_message, response.cookies['messages'].value)
submitted_attributes = initial.copy()
submitted_attributes['status'] = 'Submitted'
primary = MultiChoiceAnswer.objects.filter(response=self.option1, question=self.question1,
**submitted_attributes)
answer_1 = NumericalAnswer.objects.filter(response=int(data['Number-0-response']), question=self.question2,
**submitted_attributes)
answer_2 = NumericalAnswer.objects.filter(response=int(data['Number-1-response']), question=self.question3,
**submitted_attributes)
self.failIf(primary)
self.failIf(answer_1)
self.failIf(answer_2)
class QuestionnaireCloneViewTest(BaseTest):
def setUp(self):
self.this_year = datetime.now().year
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", status=Questionnaire.FINALIZED,
year=(self.this_year - 1))
self.section_1 = Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)",
order=1,
questionnaire=self.questionnaire, name="Reported Cases")
self.section_2 = Section.objects.create(title="Cured Cases of Measles", order=1,
questionnaire=self.questionnaire, name="Cured Cases")
self.sub_section1 = SubSection.objects.create(title="Reported cases for the year 2013", order=1,
section=self.section_1)
self.sub_section2 = SubSection.objects.create(title="Reported cases for the year", order=2,
section=self.section_1)
self.sub_section3 = SubSection.objects.create(title="Reported cures 2014", order=1, section=self.section_2)
self.sub_section4 = SubSection.objects.create(title="Reported cures", order=2, section=self.section_2)
self.primary_question = Question.objects.create(text='Disease', UID='C00003', answer_type='MultiChoice',
is_primary=True)
self.option = QuestionOption.objects.create(text="Measles", question=self.primary_question, UID="QO1")
self.option2 = QuestionOption.objects.create(text="TB", question=self.primary_question, UID="QO2")
self.question1 = Question.objects.create(text='B. Number of cases tested', UID='C00004', answer_type='Number')
self.question2 = Question.objects.create(text='C. Number of cases positive',
instructions="""
Include only those cases found positive for the infectious agent.
""",
UID='C00005', answer_type='Number')
self.parent10 = QuestionGroup.objects.create(subsection=self.sub_section1, order=1)
self.parent12 = QuestionGroup.objects.create(subsection=self.sub_section1, order=2)
self.question3 = Question.objects.create(text='B. Number of cases tested', UID=Question.next_uid(),
answer_type='Number')
self.question4 = Question.objects.create(text='C. Number of cases positive', UID=Question.next_uid(),
answer_type='Number')
QuestionGroupOrder.objects.create(order=1, question_group=self.parent10, question=self.primary_question)
QuestionGroupOrder.objects.create(order=2, question_group=self.parent10, question=self.question1)
QuestionGroupOrder.objects.create(order=3, question_group=self.parent10, question=self.question2)
QuestionGroupOrder.objects.create(order=4, question_group=self.parent12, question=self.question3)
QuestionGroupOrder.objects.create(order=5, question_group=self.parent12, question=self.question4)
self.parent10.question.add(self.question3, self.question4, self.question2, self.question1,
self.primary_question)
self.client = Client()
self.user = self.create_user(group=self.GLOBAL_ADMIN, org="WHO")
self.assign('can_view_users', self.user)
self.client.login(username=self.user.username, password='pass')
def test_post_clone_questionnaire(self):
form_data = {
'questionnaire': self.questionnaire.id,
'year': self.this_year,
'name': 'New name'
}
self.assertEqual(1, Questionnaire.objects.all().count())
self.assertEqual(2, Section.objects.all().count())
self.assertEqual(4, SubSection.objects.all().count())
self.assertEqual(2, QuestionGroup.objects.all().count())
self.assertEqual(5, Question.objects.all().count())
response = self.client.post('/questionnaire/entry/duplicate/', data=form_data)
self.assertEqual(2, Questionnaire.objects.all().count())
self.assertEqual(4, Section.objects.all().count())
self.assertEqual(8, SubSection.objects.all().count())
self.assertEqual(4, QuestionGroup.objects.all().count())
self.assertEqual(5, Question.objects.all().count())
questionnaire = Questionnaire.objects.all().exclude(id=self.questionnaire.id).latest('modified')
section = questionnaire.sections.all()[0]
url = '/questionnaire/entry/%d/section/%d/' % (questionnaire.id, section.id)
self.assertRedirects(response, url)
messages = "The questionnaire has been duplicated successfully, You can now go ahead and edit it"
self.assertIn(messages, response.cookies['messages'].value)
def test_post_clone_questionnaire_with_invalid_form(self):
form_data = {
'questionnaire': self.questionnaire.id,
'year': 2030
}
self.assertEqual(1, Questionnaire.objects.all().count())
self.assertEqual(2, Section.objects.all().count())
self.assertEqual(4, SubSection.objects.all().count())
self.assertEqual(2, QuestionGroup.objects.all().count())
self.assertEqual(5, Question.objects.all().count())
response = self.client.post('/questionnaire/entry/duplicate/', data=form_data)
self.assertEqual(1, Questionnaire.objects.all().count())
self.assertEqual(2, Section.objects.all().count())
self.assertEqual(4, SubSection.objects.all().count())
self.assertEqual(2, QuestionGroup.objects.all().count())
self.assertEqual(5, Question.objects.all().count())
url = '/manage/'
self.assertRedirects(response, url)
messages = "Questionnaire could not be duplicated see errors below"
self.assertIn(messages, response.cookies['messages'].value)
def test_permission_reguired(self):
self.assert_permission_required("/manage/")
class DeleteAnswerGroupViewTest(BaseTest):
def setUp(self):
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", status=Questionnaire.FINALIZED,
year=2013)
self.section_1 = Section.objects.create(title="Reported Cases of Selected Vaccine Preventable Diseases (VPDs)",
order=1,
questionnaire=self.questionnaire, name="Reported Cases")
self.sub_section1 = SubSection.objects.create(title="Reported cases for the year 2013", order=1,
section=self.section_1)
self.primary_question = Question.objects.create(text='Disease', UID='C00003', answer_type='Text',
is_primary=True)
self.question1 = Question.objects.create(text='B. Number of cases tested', UID='C00004', answer_type='Number')
self.question2 = Question.objects.create(text='C. Number of cases positive',
UID='C00005', answer_type='Number')
self.parent10 = QuestionGroup.objects.create(subsection=self.sub_section1, order=1, grid=True)
QuestionGroupOrder.objects.create(order=1, question_group=self.parent10, question=self.primary_question)
QuestionGroupOrder.objects.create(order=2, question_group=self.parent10, question=self.question1)
QuestionGroupOrder.objects.create(order=3, question_group=self.parent10, question=self.question2)
self.parent10.question.add(self.question2, self.question1, self.primary_question)
self.client = Client()
self.user = self.create_user(group=self.DATA_SUBMITTER, country="Uganda", region="AFRO")
self.country = self.user.user_profile.country
self.region = self.user.user_profile.country.regions.all()[0]
self.assign('can_submit_responses', self.user)
self.client.login(username=self.user.username, password='pass')
self.url = '/questionnaire/entry/%d/section/%d/delete/%d/' % (
self.questionnaire.id, self.section_1.id, self.parent10.id)
def test_post_deletes_answer_group_and_answers_corresponding_to_the_primary_answer(self):
answer_data = {'questionnaire': self.questionnaire, 'version': 1, 'country': self.country}
primary_answer = TextAnswer.objects.create(response="haha", question=self.primary_question, **answer_data)
question1_answer = NumericalAnswer.objects.create(response=1, question=self.primary_question, **answer_data)
question2_answer = NumericalAnswer.objects.create(response=2, question=self.primary_question, **answer_data)
answer_group = primary_answer.answergroup.create(grouped_question=self.parent10)
answer_group.answer.add(question1_answer, question2_answer)
data = {'primary_answer': primary_answer.id}
response = self.client.post(self.url, data=data)
self.assertEqual(200, response.status_code)
self.failIf(AnswerGroup.objects.filter(grouped_question=self.parent10, answer=primary_answer))
self.failIf(TextAnswer.objects.filter(response=primary_answer.response))
self.failIf(NumericalAnswer.objects.filter(response=question1_answer.response))
self.failIf(NumericalAnswer.objects.filter(response=question2_answer.response))
def test_non_grid_answers_cannot_be_deleted(self):
self.parent10.grid = False
self.parent10.save()
answer_data = {'questionnaire': self.questionnaire, 'version': 1, 'country': self.country}
primary_answer = TextAnswer.objects.create(response="haha", question=self.primary_question, **answer_data)
question1_answer = NumericalAnswer.objects.create(response=1, question=self.primary_question, **answer_data)
question2_answer = NumericalAnswer.objects.create(response=2, question=self.primary_question, **answer_data)
answer_group = primary_answer.answergroup.create(grouped_question=self.parent10)
answer_group.answer.add(question1_answer, question2_answer)
data = {'primary_answer': primary_answer.id}
response = self.client.post(self.url, data=data)
self.failUnless(AnswerGroup.objects.filter(grouped_question=self.parent10, answer=primary_answer))
self.failUnless(TextAnswer.objects.filter(response=primary_answer.response))
self.failUnless(NumericalAnswer.objects.filter(response=question1_answer.response))
self.failUnless(NumericalAnswer.objects.filter(response=question2_answer.response))
def test_only_own_answer_can_be_deleted(self):
kenya = Country.objects.create(name="Kenya")
answer_data = {'questionnaire': self.questionnaire, 'version': 1, 'country': kenya}
primary_answer = TextAnswer.objects.create(response="haha", question=self.primary_question, **answer_data)
question1_answer = NumericalAnswer.objects.create(response=1, question=self.primary_question, **answer_data)
question2_answer = NumericalAnswer.objects.create(response=2, question=self.primary_question, **answer_data)
answer_group = primary_answer.answergroup.create(grouped_question=self.parent10)
answer_group.answer.add(question1_answer, question2_answer)
data = {'primary_answer': primary_answer.id}
response = self.client.post(self.url, data=data)
self.failUnless(AnswerGroup.objects.filter(grouped_question=self.parent10, answer=primary_answer))
self.failUnless(TextAnswer.objects.filter(response=primary_answer.response))
self.failUnless(NumericalAnswer.objects.filter(response=question1_answer.response))
self.failUnless(NumericalAnswer.objects.filter(response=question2_answer.response))
def test_permission_required(self):
self.assert_permission_required(self.url)
class PreviewModeQuestionnaireEntryTest(BaseTest):
def test_data_submitter_is_not_preview_mode_only_if_requested_from_url(self):
self.client = Client()
self.user = self.create_user(group=self.DATA_SUBMITTER, country="Uganda", region="AFRO")
self.country = self.user.user_profile.country
self.region = self.user.user_profile.country.regions.all()[0]
self.assign('can_submit_responses', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="q", status=Questionnaire.PUBLISHED, region=self.region)
self.section_1 = Section.objects.create(title="section1", order=1, questionnaire=self.questionnaire,
name="section1")
url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertFalse(response.context['preview'])
response = self.client.get(url + '?preview=1')
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
def test_global_damin_is_on_preview_mode_even_if_not_requested_from_url_when_questionnaire_is_published(self):
self.client = Client()
self.user = self.create_user(group=self.GLOBAL_ADMIN, org="WHO")
self.region = Region.objects.create(name="AFRO")
self.assign('can_edit_questionnaire', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="q", status=Questionnaire.PUBLISHED, region=self.region)
self.section_1 = Section.objects.create(title="section1", order=1, questionnaire=self.questionnaire,
name="section1")
url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
response = self.client.get(url + '?preview=1')
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
def test_global_damin_is_on_preview_mode_even_if_not_requested_from_url_when_questionnaire_is_finalized(self):
self.client = Client()
self.user = self.create_user(group=self.GLOBAL_ADMIN, org="WHO")
self.region = Region.objects.create(name="AFRO")
self.assign('can_edit_questionnaire', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="q", status=Questionnaire.FINALIZED, region=self.region)
self.section_1 = Section.objects.create(title="section1", order=1, questionnaire=self.questionnaire,
name="section1")
url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
response = self.client.get(url + '?preview=1')
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
def test_global_damin_is_on_preview_mode_even_if_not_requested_from_url_when_questionnaire_is_archived(self):
self.client = Client()
self.user = self.create_user(group=self.GLOBAL_ADMIN, org="WHO")
self.region = Region.objects.create(name="AFRO")
self.assign('can_edit_questionnaire', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="q", status=Questionnaire.ARCHIVED, region=self.region)
self.section_1 = Section.objects.create(title="section1", order=1, questionnaire=self.questionnaire,
name="section1")
url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
response = self.client.get(url + '?preview=1')
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
def test_global_damin_is_NOT_preview_mode_when_questionnaire_is_draft_unless_requested_from_url(self):
self.client = Client()
self.user = self.create_user(group=self.REGIONAL_ADMIN, org="WHO", region="AFRO")
self.region = self.user.user_profile.region
self.assign('can_edit_questionnaire', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="q", status=Questionnaire.DRAFT, region=self.region)
self.section_1 = Section.objects.create(title="section1", order=1, questionnaire=self.questionnaire,
name="section1")
url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section_1.id)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertFalse(response.context['preview'])
response = self.client.get(url + '?preview=1')
self.assertEqual(200, response.status_code)
self.assertTrue(response.context['preview'])
class ArchiveQuestionnaireViewTest(BaseTest):
def setUp(self):
self.client = Client()
self.user = self.create_user(group=self.GLOBAL_ADMIN, org="WHO")
self.assign('can_view_users', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = QuestionnaireFactory(status=Questionnaire.FINALIZED)
self.section = SectionFactory(questionnaire=self.questionnaire)
def test_post_archive_questionnaire_sets_questionnaire_and_its_children_status_to_archived(self):
afr = RegionFactory(name='AFR')
amr = RegionFactory(name='AMR')
core_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=None, status=Questionnaire.FINALIZED)
SectionFactory(questionnaire=core_child_questionnaire)
afro_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=afr)
amr_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=amr)
response = self.client.post('/questionnaire/%d/archive/' % self.questionnaire.id)
expected_url = '/manage/'
archived_questionnaire = Questionnaire.objects.get(id=self.questionnaire.id)
afro_archived_questionnaire = Questionnaire.objects.get(id=afro_child_questionnaire.id)
amr_archived_questionnaire = Questionnaire.objects.get(id=amr_child_questionnaire.id)
expected_message = "The questionnaire '%s' was archived successfully." % self.questionnaire.name
self.assertRedirects(response, expected_url, status_code=302)
self.assertEqual(afro_archived_questionnaire.status, Questionnaire.ARCHIVED)
self.assertEqual(amr_archived_questionnaire.status, Questionnaire.ARCHIVED)
self.assertEqual(archived_questionnaire.status, Questionnaire.ARCHIVED)
self.assertTrue(Questionnaire.objects.get(id=core_child_questionnaire.id).is_finalized())
self.assertIn(expected_message, response.cookies['messages'].value)
def test_post_archive_to_non_archivable_questionnaire(self):
questionnaire = QuestionnaireFactory(status=Questionnaire.PUBLISHED)
section = SectionFactory(questionnaire=questionnaire)
regional_questionnaire = QuestionnaireFactory(status=Questionnaire.PUBLISHED, parent=questionnaire)
section = SectionFactory(questionnaire=regional_questionnaire)
NumericalAnswerFactory(questionnaire=regional_questionnaire)
response = self.client.post('/questionnaire/%d/archive/' % questionnaire.id)
expected_url = '/manage/'
expected_message = "The questionnaire '%s' could not be archived, because it is %s." % (questionnaire.name, questionnaire.status)
reloaded_questionnaire = Questionnaire.objects.get(id=questionnaire.id)
reloaded_regional_questionnaire = Questionnaire.objects.get(id=regional_questionnaire.id)
self.assertRedirects(response, expected_url, status_code=302)
self.assertIn(expected_message, response.cookies['messages'].value)
self.assertTrue(reloaded_questionnaire.is_published())
self.assertTrue(reloaded_regional_questionnaire.is_published())
def test_permission_required(self):
self.assert_permission_required('/questionnaire/1/archive/')
class DeleteQuestionnaireViewTest(BaseTest):
def setUp(self):
self.client = Client()
self.user = self.create_user(group=self.GLOBAL_ADMIN, org="WHO")
self.assign('can_view_users', self.user)
self.assign('can_edit_questionnaire', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = QuestionnaireFactory(status=Questionnaire.FINALIZED)
self.section = SectionFactory(questionnaire=self.questionnaire)
def test_delete_a_questionnaire_and_archive_the_questionnaires_children(self):
afr = RegionFactory(name='AFR')
amr = RegionFactory(name='AMR')
core_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=None, status=Questionnaire.FINALIZED)
SectionFactory(questionnaire=core_child_questionnaire)
afro_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=afr)
amr_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=amr)
response = self.client.post('/questionnaire/%d/delete/' % self.questionnaire.id)
expected_url = '/manage/'
afro_archived_questionnaire = Questionnaire.objects.get(id=afro_child_questionnaire.id)
amr_archived_questionnaire = Questionnaire.objects.get(id=amr_child_questionnaire.id)
expected_message = "The questionnaire '%s' was deleted successfully." % self.questionnaire.name
self.assertRedirects(response, expected_url, status_code=302)
self.failIf(Questionnaire.objects.filter(id=self.questionnaire.id))
self.assertEqual(amr_archived_questionnaire.status, Questionnaire.ARCHIVED)
self.assertEqual(afro_archived_questionnaire.status, Questionnaire.ARCHIVED)
self.assertTrue(Questionnaire.objects.get(id=core_child_questionnaire.id).is_finalized())
self.assertIn(expected_message, response.cookies['messages'].value)
def test_delete_a_questionnaire_redirects_with_error_message_if_questionnaire_is_not_deleteable(self):
afr = RegionFactory(name='AFR')
afro_child_questionnaire = QuestionnaireFactory(parent=self.questionnaire, region=afr)
NumericalAnswerFactory(questionnaire=afro_child_questionnaire)
response = self.client.post('/questionnaire/%d/delete/' % self.questionnaire.id)
expected_url = '/manage/'
expected_message = "The questionnaire \'%s\' could not be deleted. Because it has responses." % self.questionnaire.name
self.assertRedirects(response, expected_url, status_code=302)
self.failUnless(Questionnaire.objects.filter(id=self.questionnaire.id))
self.assertIn(expected_message, response.cookies['messages'].value)
|
|
# Copyright 2017 Dravetech AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from collections import Callable
from typing import Optional, List, Dict, Union, Any
from napalm.base.base import NetworkDriver
import napalm.base.exceptions
import inspect
import json
import os
import re
from pydoc import locate
from napalm.base.test import models
def raise_exception(result): # type: ignore
exc = locate(result["exception"])
if exc:
raise exc(*result.get("args", []), **result.get("kwargs", {}))
else:
raise TypeError("Couldn't resolve exception {}", result["exception"])
def is_mocked_method(method: str) -> bool:
mocked_methods = ["traceroute", "ping"]
if method.startswith("get_") or method in mocked_methods:
return True
return False
def mocked_method(path: str, name: str, count: int) -> Callable:
parent_method = getattr(NetworkDriver, name)
parent_method_args = inspect.getfullargspec(parent_method)
modifier = 0 if "self" not in parent_method_args.args else 1
def _mocked_method(*args, **kwargs): # type: ignore
# Check len(args)
if len(args) + len(kwargs) + modifier > len(parent_method_args.args):
raise TypeError(
"{}: expected at most {} arguments, got {}".format(
name, len(parent_method_args.args), len(args) + modifier
)
)
# Check kwargs
unexpected = [x for x in kwargs if x not in parent_method_args.args]
if unexpected:
raise TypeError(
"{} got an unexpected keyword argument '{}'".format(name, unexpected[0])
)
return mocked_data(path, name, count)
return _mocked_method
def mocked_data(path: str, name: str, count: int) -> Union[Dict, List]:
filename = "{}.{}".format(os.path.join(path, name), count)
try:
with open(filename) as f:
result = json.loads(f.read())
except IOError:
raise NotImplementedError("You can provide mocked data in {}".format(filename))
if "exception" in result:
raise_exception(result)
assert False
else:
return result
class MockDevice(object):
def __init__(self, parent: NetworkDriver, profile: str) -> None:
self.parent = parent
self.profile = profile
def run_commands(self, commands: List[str]) -> str:
"""Mock for EOS"""
return_value = list(self.parent.cli(commands).values())[0]
assert isinstance(return_value, str)
return return_value
def show(self, command: str) -> str:
"""Mock for nxos"""
return self.run_commands([command])
class MockDriver(NetworkDriver):
def __init__(
self,
hostname: str,
username: str,
password: str,
timeout: int = 60,
optional_args: Optional[Dict] = None,
) -> None:
"""
Supported optional_args:
* path(str) - path to where the mocked files are located
* profile(list) - List of profiles to assign
"""
self.hostname = hostname
self.username = username
self.password = password
if not optional_args:
optional_args = {}
self.path = optional_args.get("path", "")
self.profile = optional_args.get("profile", [])
self.fail_on_open = optional_args.get("fail_on_open", False)
self.opened = False
self.calls: Dict[str, int] = {}
self.device = MockDevice(self, self.profile)
# None no action, True load_merge, False load_replace
self.merge: Optional[bool] = None
self.filename: Optional[str] = None
self.config: Optional[str] = None
self._pending_commits = False
def _count_calls(self, name: str) -> int:
current_count = self.calls.get(name, 0)
self.calls[name] = current_count + 1
return self.calls[name]
def _raise_if_closed(self) -> None:
if not self.opened:
raise napalm.base.exceptions.ConnectionClosedException("connection closed")
def open(self) -> None:
if self.fail_on_open:
raise napalm.base.exceptions.ConnectionException("You told me to do this")
self.opened = True
def close(self) -> None:
self.opened = False
def is_alive(self) -> models.AliveDict:
return {"is_alive": self.opened}
def cli(self, commands: List[str]) -> Dict[str, Union[str, Dict[str, Any]]]:
count = self._count_calls("cli")
result = {}
regexp = re.compile("[^a-zA-Z0-9]+")
for i, c in enumerate(commands):
sanitized = re.sub(regexp, "_", c)
name = "cli.{}.{}".format(count, sanitized)
filename = "{}.{}".format(os.path.join(self.path, name), i)
with open(filename, "r") as f:
result[c] = f.read()
return result # type: ignore
def load_merge_candidate(
self, filename: Optional[str] = None, config: Optional[str] = None
) -> None:
count = self._count_calls("load_merge_candidate")
self._raise_if_closed()
self.merge = True
self.filename = filename
self.config = config
mocked_data(self.path, "load_merge_candidate", count)
def load_replace_candidate(
self, filename: Optional[str] = None, config: Optional[str] = None
) -> None:
count = self._count_calls("load_replace_candidate")
self._raise_if_closed()
self.merge = False
self.filename = filename
self.config = config
mocked_data(self.path, "load_replace_candidate", count)
def compare_config(
self, filename: Optional[str] = None, config: Optional[str] = None
) -> str:
count = self._count_calls("compare_config")
self._raise_if_closed()
mocked = mocked_data(self.path, "compare_config", count)
assert isinstance(mocked, dict)
return mocked["diff"]
def commit_config(self, message: str = "", revert_in: Optional[int] = None) -> None:
count = self._count_calls("commit_config")
self._raise_if_closed()
if revert_in is not None:
if self.has_pending_commit():
raise napalm.base.exceptions.CommitError(
"Pending commit confirm already in process!"
)
else:
self._pending_commits = True
self.merge = None
self.filename = None
self.config = None
mocked_data(self.path, "commit_config", count)
def discard_config(self) -> None:
count = self._count_calls("discard_config")
self._raise_if_closed()
self.merge = None
self.filename = None
self.config = None
mocked_data(self.path, "discard_config", count)
def confirm_commit(self) -> None:
count = self._count_calls("confirm_commit")
self._raise_if_closed()
self.merge = None
self.filename = None
self.config = None
self._pending_commits = False
mocked_data(self.path, "confirm_commit", count)
def has_pending_commit(self) -> bool:
return self._pending_commits
def rollback(self) -> None:
self.config_session = None
self._pending_commits = False
def _rpc(self, get: str) -> str:
"""This one is only useful for junos."""
return_value = list(self.cli([get]).values())[0]
assert isinstance(return_value, str)
return return_value
def __getattribute__(self, name: str) -> Callable:
if is_mocked_method(name):
self._raise_if_closed()
count = self._count_calls(name)
return mocked_method(self.path, name, count)
else:
return object.__getattribute__(self, name)
|
|
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import ntpath
import posixpath
import sys
import collections
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(BUILD_TOOLS_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, 'third_party', 'pymock')
# For the mock library
sys.path.append(MOCK_DIR)
from mock import call, patch, Mock
sys.path.append(BUILD_TOOLS_DIR)
import build_artifacts
class BasePosixTestCase(unittest.TestCase):
def setUp(self):
self.addCleanup(patch.stopall)
patch('build_artifacts.PLATFORM', 'posix').start()
patch('build_artifacts.BUILD_ARCHIVE_DIR', '/archive_dir/').start()
patch('os.path.join', posixpath.join).start()
class PosixTestCase(BasePosixTestCase):
def setUp(self):
BasePosixTestCase.setUp(self)
def testGetToolchainNaClLib(self):
tests = [
(('glibc_x86', 'x86_32'), 'foo/x86_64-nacl/lib32'),
(('glibc_x86', 'x86_64'), 'foo/x86_64-nacl/lib'),
(('glibc_arm', 'arm'), 'foo/arm-nacl/lib'),
(('pnacl', None), 'foo/le32-nacl/lib'),
]
for test in tests:
self.assertEqual(
build_artifacts.GetToolchainNaClLib(test[0][0], 'foo', test[0][1]),
test[1])
def testGetGypBuiltLib(self):
tests = [
(('glibc_x86', 'x86_32'), 'foo/Release/gen/tc_glibc/lib32'),
(('glibc_x86', 'x86_64'), 'foo/Release/gen/tc_glibc/lib64'),
(('glibc_arm', 'arm'), 'foo/Release/gen/tc_glibc/libarm'),
(('pnacl', None), 'foo/Release/gen/tc_pnacl_newlib/lib')
]
for test in tests:
self.assertEqual(
build_artifacts.GetGypBuiltLib('foo', test[0][0], test[0][1]),
test[1])
def testGetGypToolchainLib(self):
tests = [
(('glibc_x86', 'x86_32'),
'foo/Release/gen/sdk/posix_x86/nacl_x86_glibc/x86_64-nacl/lib32'),
(('glibc_x86', 'x86_64'),
'foo/Release/gen/sdk/posix_x86/nacl_x86_glibc/x86_64-nacl/lib'),
(('glibc_arm', 'arm'),
'foo/Release/gen/sdk/posix_x86/nacl_arm_glibc/arm-nacl/lib'),
(('pnacl', None),
'foo/Release/gen/sdk/posix_x86/pnacl_newlib/le32-nacl/lib'),
]
for tc_info, expected in tests:
self.assertEqual(
build_artifacts.GetGypToolchainLib('foo', tc_info[0], tc_info[1]),
expected)
@patch('build_artifacts.all_archives', ['foo.tar.bz2', 'bar.tar.bz2'])
@patch('build_version.ChromeMajorVersion', Mock(return_value='40'))
@patch('build_version.ChromeRevision', Mock(return_value='302630'))
@patch('build_version.ChromeCommitPosition', Mock(return_value=
'1492c3d296476fe12cafecabba6ebabe-refs/heads/master@{#302630}'))
@patch('buildbot_common.Archive')
def testUploadArchives(self, archive_mock):
build_artifacts.UploadArchives()
cwd = '/archive_dir/'
bucket_path = 'native-client-sdk/archives/40-302630-1492c3d29'
archive_mock.assert_has_calls([
call('foo.tar.bz2', bucket_path, cwd=cwd, step_link=False),
call('foo.tar.bz2.sha1', bucket_path, cwd=cwd, step_link=False),
call('bar.tar.bz2', bucket_path, cwd=cwd, step_link=False),
call('bar.tar.bz2.sha1', bucket_path, cwd=cwd, step_link=False)
])
class GypNinjaPosixTestCase(BasePosixTestCase):
def setUp(self):
BasePosixTestCase.setUp(self)
patch('sys.executable', 'python').start()
patch('build_artifacts.SRC_DIR', 'src_dir').start()
patch('os.environ', {}).start()
self.run_mock = patch('buildbot_common.Run').start()
self.options_mock = patch('build_artifacts.options').start()
self.options_mock.mac_sdk = False
self.options_mock.no_arm_trusted = False
self.gyp_defines_base = []
def testSimple(self):
build_artifacts.GypNinjaBuild(
None, 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={'GYP_DEFINES': ' '.join(self.gyp_defines_base)}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testTargetArch(self):
build_artifacts.GypNinjaBuild(
'x64', 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['target_arch=x64']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testMultipleTargets(self):
build_artifacts.GypNinjaBuild(
None, 'gyp.py', 'foo.gyp', ['target1', 'target2'], 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={'GYP_DEFINES': ' '.join(self.gyp_defines_base)}),
call(['ninja', '-C', 'out_dir/Release', 'target1', 'target2'],
cwd='src_dir')
])
def testMacSdk(self):
build_artifacts.PLATFORM = 'mac'
self.options_mock.mac_sdk = '10.6'
build_artifacts.GypNinjaBuild(
None, 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['mac_sdk=10.6']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testArmLinux(self):
build_artifacts.PLATFORM = 'linux'
build_artifacts.GypNinjaBuild(
'arm', 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_CROSSCOMPILE': '1',
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['target_arch=arm']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
def testNoArmTrusted(self):
build_artifacts.PLATFORM = 'linux'
self.options_mock.no_arm_trusted = True
build_artifacts.GypNinjaBuild(
'arm', 'gyp.py', 'foo.gyp', 'target', 'out_dir')
self.run_mock.assert_has_calls([
call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G',
'output_dir=out_dir'],
cwd='src_dir',
env={
'GYP_CROSSCOMPILE': '1',
'GYP_DEFINES': ' '.join(self.gyp_defines_base +
['target_arch=arm',
'disable_cross_trusted=1']),
}),
call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir')
])
class ArchivePosixTestCase(BasePosixTestCase):
def setUp(self):
BasePosixTestCase.setUp(self)
self.makedir_mock = patch('buildbot_common.MakeDir').start()
self.copyfile_mock = patch('buildbot_common.CopyFile').start()
self.copydir_mock = patch('buildbot_common.CopyDir').start()
self.isdir_mock = patch('os.path.isdir').start()
patch('os.path.exists', Mock(return_value=False)).start()
def dummy_isdir(path):
if path == '/archive_dir/posix_foo':
return True
return False
self.isdir_mock.side_effect = dummy_isdir
self.archive = build_artifacts.Archive('foo')
def testInit(self):
self.assertEqual(self.archive.name, 'posix_foo')
self.assertEqual(self.archive.archive_name, 'posix_foo.tar.bz2')
self.assertEqual(self.archive.archive_path,
'/archive_dir/posix_foo.tar.bz2')
self.assertEqual(self.archive.dirname, '/archive_dir/posix_foo')
self.makedir_mock.assert_called_once_with('/archive_dir/posix_foo')
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopySimple(self):
self.archive.Copy('/copy_from', ['file1', 'file2'])
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_has_calls([
call('/copy_from/file1', '/archive_dir/posix_foo/file1'),
call('/copy_from/file2', '/archive_dir/posix_foo/file2')])
@patch('glob.glob')
def testCopyGlob(self, glob_mock):
glob_mock.return_value = ['/copy_from/foo', '/copy_from/bar']
self.archive.Copy('/copy_from', [('*', '')])
glob_mock.assert_called_once_with('/copy_from/*')
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_has_calls([
call('/copy_from/foo', '/archive_dir/posix_foo/'),
call('/copy_from/bar', '/archive_dir/posix_foo/')])
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopyRename(self):
self.archive.Copy('/copy_from', [('file1', 'file1_renamed')])
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_called_once_with(
'/copy_from/file1', '/archive_dir/posix_foo/file1_renamed')
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopyNewDir(self):
self.archive.Copy('/copy_from', [('file1', 'todir/')])
self.assertEqual(self.copydir_mock.call_count, 0)
self.copyfile_mock.assert_called_once_with(
'/copy_from/file1', '/archive_dir/posix_foo/todir/file1')
@patch('glob.glob', Mock(side_effect=lambda x: [x]))
def testCopyDir(self):
self.isdir_mock.side_effect = lambda _: True
self.archive.Copy('/copy_from', ['dirname'])
self.assertEqual(self.copyfile_mock.call_count, 0)
self.copydir_mock.assert_called_once_with(
'/copy_from/dirname', '/archive_dir/posix_foo/dirname')
class WinTestCase(unittest.TestCase):
def setUp(self):
patch('build_artifacts.PLATFORM', 'win').start()
patch('build_artifacts.BUILD_ARCHIVE_DIR', 'c:\\archive_dir\\').start()
patch('os.path.join', ntpath.join).start()
def tearDown(self):
patch.stopall()
@patch('os.path.exists', Mock(return_value=False))
@patch('buildbot_common.MakeDir')
def testArchiveInit(self, makedir_mock):
archive = build_artifacts.Archive('foo')
self.assertEqual(archive.name, 'win_foo')
self.assertEqual(archive.archive_name, 'win_foo.tar.bz2')
self.assertEqual(archive.archive_path, r'c:\archive_dir\win_foo.tar.bz2')
self.assertEqual(archive.dirname, r'c:\archive_dir\win_foo')
makedir_mock.assert_called_once_with(r'c:\archive_dir\win_foo')
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import string
import inspect
from weakref import WeakKeyDictionary
from datetime import datetime, date
from itertools import chain
from werkzeug._compat import iter_bytes, text_type, BytesIO, int_to_byte, \
range_type, to_native
_logger = None
_empty_stream = BytesIO()
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
_cookie_params = set((b'expires', b'path', b'comment',
b'max-age', b'secure', b'httponly',
b'version'))
_legal_cookie_chars = (string.ascii_letters +
string.digits +
u"!#$%&'*+-.^_`|~:").encode('ascii')
_cookie_quoting_map = {
b',' : b'\\054',
b';' : b'\\073',
b'"' : b'\\"',
b'\\' : b'\\\\',
}
for _i in chain(range_type(32), range_type(127, 256)):
_cookie_quoting_map[int_to_byte(_i)] = ('\\%03o' % _i).encode('latin1')
_octal_re = re.compile(b'\\\\[0-3][0-7][0-7]')
_quote_re = re.compile(b'[\\\\].')
_legal_cookie_chars_re = b'[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]'
_cookie_re = re.compile(b"""(?x)
(?P<key>[^=]+)
\s*=\s*
(?P<val>
"(?:[^\\\\"]|\\\\.)*" |
(?:.*?)
)
\s*;
""")
class _Missing(object):
def __repr__(self):
return 'no value'
def __reduce__(self):
return '_missing'
_missing = _Missing()
def _get_environ(obj):
env = getattr(obj, 'environ', obj)
assert isinstance(env, dict), \
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
return env
def _log(type, message, *args, **kwargs):
"""Log into the internal werkzeug logger."""
global _logger
if _logger is None:
import logging
_logger = logging.getLogger('werkzeug')
# Only set up a default log handler if the
# end-user application didn't set anything up.
if not logging.root.handlers and _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
_logger.addHandler(handler)
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, 'im_func'):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError('cannot parse functions that unpack tuples '
'in the function signature')
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and not kwarg_var is not None:
extra.update(kwargs)
kwargs = {}
return new_args, kwargs, missing, extra, extra_positional, \
arguments, vararg_var, kwarg_var
_signature_cache[func] = parse
return parse
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, (int, long, float)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(self, name, default=None, load_func=None, dump_func=None,
read_only=None, doc=None):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError('read only property')
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError('read only property')
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name
)
def _cookie_quote(b):
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char in iter_bytes(b):
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b):
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1:j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b):
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b';', i)
if not match:
break
key = match.group('key').strip()
value = match.group('val')
i = match.end(0)
# Ignore parameters. We have no interest in them.
if key.lower() not in _cookie_params:
yield _cookie_unquote(key), _cookie_unquote(value)
def _encode_idna(domain):
# If we're given bytes, make sure they fit into ASCII
if not isinstance(domain, text_type):
domain.decode('ascii')
return domain
# Otherwise check if it's already ascii, then return
try:
return domain.encode('ascii')
except UnicodeError:
pass
# Otherwise encode each part separately
parts = domain.split('.')
for idx, part in enumerate(parts):
parts[idx] = part.encode('idna')
return b'.'.join(parts)
def _decode_idna(domain):
# If the input is a string try to encode it to ascii to
# do the idna decoding. if that fails because of an
# unicode error, then we already have a decoded idna domain
if isinstance(domain, text_type):
try:
domain = domain.encode('ascii')
except UnicodeError:
return domain
# Decode each part separately. If a part fails, try to
# decode it with ascii and silently ignore errors. This makes
# most sense because the idna codec does not have error handling
parts = domain.split(b'.')
for idx, part in enumerate(parts):
try:
parts[idx] = part.decode('idna')
except UnicodeError:
parts[idx] = part.decode('ascii', 'ignore')
return '.'.join(parts)
def _make_cookie_domain(domain):
if domain is None:
return None
domain = _encode_idna(domain)
if b':' in domain:
domain = domain.split(b':', 1)[0]
if b'.' in domain:
return domain
raise ValueError(
'Setting \'domain\' for a cookie on a server running localy (ex: '
'localhost) is not supportted by complying browsers. You should '
'have something like: \'127.0.0.1 localhost dev.localhost\' on '
'your hosts file and then point your server to run on '
'\'dev.localhost\' and also set \'domain\' for \'dev.localhost\''
)
def _easteregg(app=None):
"""Like the name says. But who knows how it works?"""
def bzzzzzzz(gyver):
import base64
import zlib
return zlib.decompress(base64.b64decode(gyver)).decode('ascii')
gyver = u'\n'.join([x + (77 - len(x)) * u' ' for x in bzzzzzzz(b'''
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t''').splitlines()])
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(('X-Powered-By', 'Werkzeug'))
return start_response(status, headers, exc_info)
if app is not None and environ.get('QUERY_STRING') != 'macgybarchakku':
return app(environ, injecting_start_response)
injecting_start_response('200 OK', [('Content-Type', 'text/html')])
return [(u'''
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>''' % gyver).encode('latin1')]
return easteregged
|
|
#!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for mb_validate.py."""
from __future__ import print_function
from __future__ import absolute_import
import sys
import ast
import os
import unittest
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..'))
from mb import mb
from mb import mb_unittest
from mb.lib import validation
TEST_UNREFERENCED_MIXIN_CONFIG = """\
{
'public_artifact_builders': {},
'configs': {
'rel_bot_1': ['rel'],
'rel_bot_2': ['rel'],
},
'builder_groups': {
'fake_builder_group_a': {
'fake_builder_a': 'rel_bot_1',
'fake_builder_b': 'rel_bot_2',
},
},
'mixins': {
'unreferenced_mixin': {
'gn_args': 'proprietary_codecs=true',
},
'rel': {
'gn_args': 'is_debug=false',
},
},
}
"""
TEST_UNKNOWNMIXIN_CONFIG = """\
{
'public_artifact_builders': {},
'configs': {
'rel_bot_1': ['rel'],
'rel_bot_2': ['rel', 'unknown_mixin'],
},
'builder_groups': {
'fake_builder_group_a': {
'fake_builder_a': 'rel_bot_1',
'fake_builder_b': 'rel_bot_2',
},
},
'mixins': {
'rel': {
'gn_args': 'is_debug=false',
},
},
}
"""
TEST_UNKNOWN_NESTED_MIXIN_CONFIG = """\
{
'public_artifact_builders': {},
'configs': {
'rel_bot_1': ['rel', 'nested_mixin'],
'rel_bot_2': ['rel'],
},
'builder_groups': {
'fake_builder_group_a': {
'fake_builder_a': 'rel_bot_1',
'fake_builder_b': 'rel_bot_2',
},
},
'mixins': {
'nested_mixin': {
'mixins': {
'unknown_mixin': {
'gn_args': 'proprietary_codecs=true',
},
},
},
'rel': {
'gn_args': 'is_debug=false',
},
},
}
"""
class UnitTest(unittest.TestCase):
def test_GetAllConfigs(self):
configs = ast.literal_eval(mb_unittest.TEST_CONFIG)
all_configs = validation.GetAllConfigs(configs['builder_groups'])
self.assertEqual(all_configs['rel_bot'], 'fake_builder_group')
self.assertEqual(all_configs['debug_goma'], 'fake_builder_group')
def test_CheckAllConfigsAndMixinsReferenced_ok(self):
configs = ast.literal_eval(mb_unittest.TEST_CONFIG)
errs = []
all_configs = validation.GetAllConfigs(configs['builder_groups'])
config_configs = configs['configs']
mixins = configs['mixins']
validation.CheckAllConfigsAndMixinsReferenced(errs, all_configs,
config_configs, mixins)
self.assertEqual(errs, [])
def test_CheckAllConfigsAndMixinsReferenced_unreferenced(self):
configs = ast.literal_eval(TEST_UNREFERENCED_MIXIN_CONFIG)
errs = []
all_configs = validation.GetAllConfigs(configs['builder_groups'])
config_configs = configs['configs']
mixins = configs['mixins']
validation.CheckAllConfigsAndMixinsReferenced(errs, all_configs,
config_configs, mixins)
self.assertIn('Unreferenced mixin "unreferenced_mixin".', errs)
def test_CheckAllConfigsAndMixinsReferenced_unknown(self):
configs = ast.literal_eval(TEST_UNKNOWNMIXIN_CONFIG)
errs = []
all_configs = validation.GetAllConfigs(configs['builder_groups'])
config_configs = configs['configs']
mixins = configs['mixins']
validation.CheckAllConfigsAndMixinsReferenced(errs, all_configs,
config_configs, mixins)
self.assertIn(
'Unknown mixin "unknown_mixin" '
'referenced by config "rel_bot_2".', errs)
def test_CheckAllConfigsAndMixinsReferenced_unknown_nested(self):
configs = ast.literal_eval(TEST_UNKNOWN_NESTED_MIXIN_CONFIG)
errs = []
all_configs = validation.GetAllConfigs(configs['builder_groups'])
config_configs = configs['configs']
mixins = configs['mixins']
validation.CheckAllConfigsAndMixinsReferenced(errs, all_configs,
config_configs, mixins)
self.assertIn(
'Unknown mixin "unknown_mixin" '
'referenced by mixin "nested_mixin".', errs)
def test_CheckAllConfigsAndMixinsReferenced_unused(self):
configs = ast.literal_eval(TEST_UNKNOWN_NESTED_MIXIN_CONFIG)
errs = []
all_configs = validation.GetAllConfigs(configs['builder_groups'])
config_configs = configs['configs']
mixins = configs['mixins']
validation.CheckAllConfigsAndMixinsReferenced(errs, all_configs,
config_configs, mixins)
self.assertIn(
'Unknown mixin "unknown_mixin" '
'referenced by mixin "nested_mixin".', errs)
def test_EnsureNoProprietaryMixins(self):
bad_configs = ast.literal_eval(mb_unittest.TEST_BAD_CONFIG)
errs = []
builder_groups = bad_configs['builder_groups']
mixins = bad_configs['mixins']
config_configs = bad_configs['configs']
validation.EnsureNoProprietaryMixins(errs, builder_groups, config_configs,
mixins)
self.assertIn(
'Public artifact builder "a" '
'can not contain the "chrome_with_codecs" mixin.', errs)
self.assertIn(
'Public artifact builder "b" '
'can not contain the "chrome_with_codecs" mixin.', errs)
self.assertEqual(len(errs), 2)
def test_CheckDuplicateConfigs_ok(self):
configs = ast.literal_eval(mb_unittest.TEST_CONFIG)
config_configs = configs['configs']
mixins = configs['mixins']
grouping = configs['builder_groups']
errs = []
validation.CheckDuplicateConfigs(errs, config_configs, mixins, grouping,
mb.FlattenConfig)
self.assertEqual(errs, [])
@unittest.skip('bla')
def test_CheckDuplicateConfigs_dups(self):
configs = ast.literal_eval(mb_unittest.TEST_DUP_CONFIG)
config_configs = configs['configs']
mixins = configs['mixins']
grouping = configs['builder_groups']
errs = []
validation.CheckDuplicateConfigs(errs, config_configs, mixins, grouping,
mb.FlattenConfig)
self.assertIn(
'Duplicate configs detected. When evaluated fully, the '
'following configs are all equivalent: \'some_config\', '
'\'some_other_config\'. Please consolidate these configs '
'into only one unique name per configuration value.', errs)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Deprecated/SConscript-build_dir.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that specifying a build_dir argument to SConscript still works.
"""
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
test.write('SConscript', """
SConscript('DummyScript', build_dir = 'build')
""")
test.write('DummyScript', """
""")
msg = """The build_dir keyword has been deprecated; use the variant_dir keyword instead."""
test.deprecated_warning('deprecated-build-dir', msg)
warning = '\nscons: warning: ' + TestSCons.re_escape(msg) \
+ '\n' + TestSCons.file_expr
all1 = test.workpath('test', 'build', 'var1', 'all')
all2 = test.workpath('test', 'build', 'var2', 'all')
all3 = test.workpath('test', 'build', 'var3', 'all')
all4 = test.workpath('test', 'build', 'var4', 'all')
all5 = test.workpath('build', 'var5', 'all')
all6 = test.workpath('build', 'var6', 'all')
all7 = test.workpath('build', 'var7', 'all')
all8 = test.workpath('build', 'var8', 'all')
all9 = test.workpath('test', 'build', 'var9', 'src', 'all')
test.subdir('test')
test.write(['test', 'SConstruct'], """
SetOption('warn', 'deprecated-build-dir')
src = Dir('src')
alt = Dir('alt')
var1 = Dir('build/var1')
var2 = Dir('build/var2')
var3 = Dir('build/var3')
var4 = Dir('build/var4')
var5 = Dir('../build/var5')
var6 = Dir('../build/var6')
var7 = Dir('../build/var7')
var8 = Dir('../build/var8')
var9 = Dir('../build/var9')
def cat(env, source, target):
target = str(target[0])
f = open(target, "wb")
for src in source:
f.write(open(str(src), "rb").read())
f.close()
env = Environment(BUILDERS={'Cat':Builder(action=cat)},
BUILD='build')
Export("env")
SConscript('src/SConscript', build_dir=var1)
SConscript('src/SConscript', build_dir='build/var2', src_dir=src)
SConscript('src/SConscript', build_dir='build/var3', duplicate=0)
#XXX We can't support var4 and var5 yet, because our VariantDir linkage
#XXX is to an entire source directory. We haven't yet generalized our
#XXX infrastructure to be able to take the SConscript file from one source
#XXX directory, but the rest of the files from a different one.
#XXX SConscript('src/SConscript', build_dir=var4, src_dir=alt, duplicate=0)
#XXX SConscript('src/SConscript', build_dir='../build/var5', src_dir='alt')
SConscript('src/SConscript', build_dir=var6)
SConscript('src/SConscript', build_dir=var7, src_dir=src, duplicate=0)
env.SConscript('src/SConscript', build_dir='../$BUILD/var8', duplicate=0)
# This tests the fact that if you specify a src_dir that is above
# the dir a SConscript is in, that we do the intuitive thing, i.e.,
# we set the path of the SConscript accordingly. The below is
# equivalent to saying:
#
# VariantDir('build/var9', '.')
# SConscript('build/var9/src/SConscript')
SConscript('src/SConscript', build_dir='build/var9', src_dir='.')
""")
test.subdir(['test', 'src'], ['test', 'alt'])
test.write(['test', 'src', 'SConscript'], """
Import("env")
env.Cat('aaa.out', 'aaa.in')
env.Cat('bbb.out', 'bbb.in')
env.Cat('ccc.out', 'ccc.in')
env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
""")
test.write('test/src/aaa.in', "test/src/aaa.in\n")
test.write('test/src/bbb.in', "test/src/bbb.in\n")
test.write('test/src/ccc.in', "test/src/ccc.in\n")
test.write('test/alt/aaa.in', "test/alt/aaa.in\n")
test.write('test/alt/bbb.in', "test/alt/bbb.in\n")
test.write('test/alt/ccc.in', "test/alt/ccc.in\n")
test.run(chdir='test', arguments = '. ../build', stderr = 7*warning)
all_src = "test/src/aaa.in\ntest/src/bbb.in\ntest/src/ccc.in\n"
all_alt = "test/alt/aaa.in\ntest/alt/bbb.in\ntest/alt/ccc.in\n"
test.must_match(all1, all_src)
test.must_match(all2, all_src)
test.must_match(all3, all_src)
#XXX We can't support var4 and var5 yet, because our VariantDir linkage
#XXX is to an entire source directory. We haven't yet generalized our
#XXX infrastructure to be able to take the SConscript file from one source
#XXX directory, but the rest of the files from a different one.
#XXX test.must_match(all4, all_alt)
#XXX test.must_match(all5, all_alt)
test.must_match(all6, all_src)
test.must_match(all7, all_src)
test.must_match(all8, all_src)
test.must_match(all9, all_src)
import os
import stat
def equal_stats(x,y):
x = os.stat(x)
y = os.stat(y)
return (stat.S_IMODE(x[stat.ST_MODE]) == stat.S_IMODE(y[stat.ST_MODE]) and
x[stat.ST_MTIME] == y[stat.ST_MTIME])
# Make sure we did duplicate the source files in build/var1,
# and that their stats are the same:
for file in ['aaa.in', 'bbb.in', 'ccc.in']:
test.must_exist(test.workpath('test', 'build', 'var1', file))
test.fail_test(not equal_stats(test.workpath('test', 'build', 'var1', file),
test.workpath('test', 'src', file)))
# Make sure we did duplicate the source files in build/var2,
# and that their stats are the same:
for file in ['aaa.in', 'bbb.in', 'ccc.in']:
test.must_exist(test.workpath('test', 'build', 'var2', file))
test.fail_test(not equal_stats(test.workpath('test', 'build', 'var2', file),
test.workpath('test', 'src', file)))
# Make sure we didn't duplicate the source files in build/var3.
test.must_not_exist(test.workpath('test', 'build', 'var3', 'aaa.in'))
test.must_not_exist(test.workpath('test', 'build', 'var3', 'bbb.in'))
test.must_not_exist(test.workpath('test', 'build', 'var3', 'ccc.in'))
#XXX We can't support var4 and var5 yet, because our VariantDir linkage
#XXX is to an entire source directory. We haven't yet generalized our
#XXX infrastructure to be able to take the SConscript file from one source
#XXX directory, but the rest of the files from a different one.
#XXX Make sure we didn't duplicate the source files in build/var4.
#XXXtest.must_not_exist(test.workpath('test', 'build', 'var4', 'aaa.in'))
#XXXtest.must_not_exist(test.workpath('test', 'build', 'var4', 'bbb.in'))
#XXXtest.must_not_exist(test.workpath('test', 'build', 'var4', 'ccc.in'))
#XXX We can't support var4 and var5 yet, because our VariantDir linkage
#XXX is to an entire source directory. We haven't yet generalized our
#XXX infrastructure to be able to take the SConscript file from one source
#XXX directory, but the rest of the files from a different one.
#XXX Make sure we did duplicate the source files in build/var5,
#XXX and that their stats are the same:
#XXXfor file in ['aaa.in', 'bbb.in', 'ccc.in']:
#XXX test.must_exist(test.workpath('build', 'var5', file))
#XXX test.fail_test(not equal_stats(test.workpath('build', 'var5', file),
#XXX test.workpath('test', 'src', file)))
# Make sure we did duplicate the source files in build/var6,
# and that their stats are the same:
for file in ['aaa.in', 'bbb.in', 'ccc.in']:
test.must_exist(test.workpath('build', 'var6', file))
test.fail_test(not equal_stats(test.workpath('build', 'var6', file),
test.workpath('test', 'src', file)))
# Make sure we didn't duplicate the source files in build/var7.
test.must_not_exist(test.workpath('build', 'var7', 'aaa.in'))
test.must_not_exist(test.workpath('build', 'var7', 'bbb.in'))
test.must_not_exist(test.workpath('build', 'var7', 'ccc.in'))
# Make sure we didn't duplicate the source files in build/var8.
test.must_not_exist(test.workpath('build', 'var8', 'aaa.in'))
test.must_not_exist(test.workpath('build', 'var8', 'bbb.in'))
test.must_not_exist(test.workpath('build', 'var8', 'ccc.in'))
###################
test.subdir('test2')
test.write(['test2', 'SConstruct'], """\
SConscript('SConscript', build_dir='Build', src_dir='.', duplicate=0)
""")
test.write(['test2', 'SConscript'], """\
env = Environment()
foo_obj = env.Object('foo.c')
env.Program('foo', [foo_obj, 'bar.c'])
""")
test.write(['test2', 'bar.c'], r"""
#include <stdio.h>
#include <stdlib.h>
void
bar(void) {
printf("bar.c\n");
}
""")
test.write(['test2', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
extern void
bar(void);
int
main(int argc, char *argv[]) {
bar();
printf("foo.c\n");
}
""")
test.run(chdir="test2", stderr = warning)
_obj = TestSCons._obj
test.must_not_exist(test.workpath('test2', 'foo' + _obj))
test.must_not_exist(test.workpath('test2', 'bar' + _obj))
test.must_exist(test.workpath('test2', 'Build', 'foo' + _obj))
test.must_exist(test.workpath('test2', 'Build', 'bar' + _obj))
###################
# Make sure that directories for subsidiary SConscript() calls
# in a build_dir get created if they don't already exist.
test.subdir('test3')
test.subdir(['test3', 'src'], ['test3', 'src', '_glscry'])
test.write(['test3', 'SConstruct'], """\
SConscript(dirs=['src'], build_dir='build', duplicate=0)
""")
test.write(['test3', 'src', 'SConscript'], """\
SConscript(dirs=['_glscry'])
""")
test.write(['test3', 'src', '_glscry', 'SConscript'], """\
""")
test.write(['test3', 'src', 'file.in'], "file.in\n")
test.write(['test3', 'src', '_glscry', 'file.in'], "file.in\n")
test.run(chdir='test3', stderr = warning)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import shlex
import sys
import time
from io import StringIO
from typing import Any, Dict, Optional
import paramiko
if sys.version_info >= (3, 8):
from functools import cached_property
else:
from cached_property import cached_property
from google.api_core.retry import exponential_sleep_generator
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook
from airflow.providers.google.cloud.hooks.os_login import OSLoginHook
from airflow.providers.ssh.hooks.ssh import SSHHook
class _GCloudAuthorizedSSHClient(paramiko.SSHClient):
"""SSH Client that maintains the context for gcloud authorization during the connection"""
def __init__(self, google_hook, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ssh_client = paramiko.SSHClient()
self.google_hook = google_hook
self.decorator = None
def connect(self, *args, **kwargs):
self.decorator = self.google_hook.provide_authorized_gcloud()
self.decorator.__enter__()
return super().connect(*args, **kwargs)
def close(self):
if self.decorator:
self.decorator.__exit__(None, None, None)
self.decorator = None
return super().close()
def __exit__(self, type_, value, traceback):
if self.decorator:
self.decorator.__exit__(type_, value, traceback)
self.decorator = None
return super().__exit__(type_, value, traceback)
class ComputeEngineSSHHook(SSHHook):
"""
Hook to connect to a remote instance in compute engine
:param instance_name: The name of the Compute Engine instance
:param zone: The zone of the Compute Engine instance
:param user: The name of the user on which the login attempt will be made
:param project_id: The project ID of the remote instance
:param gcp_conn_id: The connection id to use when fetching connection info
:param hostname: The hostname of the target instance. If it is not passed, it will be detected
automatically.
:param use_iap_tunnel: Whether to connect through IAP tunnel
:param use_internal_ip: Whether to connect using internal IP
:param use_oslogin: Whether to manage keys using OsLogin API. If false,
keys are managed using instance metadata
:param expire_time: The maximum amount of time in seconds before the private key expires
:param gcp_conn_id: The connection id to use when fetching connection information
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
"""
conn_name_attr = 'gcp_conn_id'
default_conn_name = 'google_cloud_ssh_default'
conn_type = 'gcpssh'
hook_name = 'Google Cloud SSH'
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
return {
"hidden_fields": ['host', 'schema', 'login', 'password', 'port', 'extra'],
"relabeling": {},
}
def __init__(
self,
gcp_conn_id: str = 'google_cloud_default',
instance_name: Optional[str] = None,
zone: Optional[str] = None,
user: Optional[str] = 'root',
project_id: Optional[str] = None,
hostname: Optional[str] = None,
use_internal_ip: bool = False,
use_iap_tunnel: bool = False,
use_oslogin: bool = True,
expire_time: int = 300,
delegate_to: Optional[str] = None,
) -> None:
# Ignore original constructor
# super().__init__()
self.instance_name = instance_name
self.zone = zone
self.user = user
self.project_id = project_id
self.hostname = hostname
self.use_internal_ip = use_internal_ip
self.use_iap_tunnel = use_iap_tunnel
self.use_oslogin = use_oslogin
self.expire_time = expire_time
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self._conn: Optional[Any] = None
@cached_property
def _oslogin_hook(self) -> OSLoginHook:
return OSLoginHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
@cached_property
def _compute_hook(self) -> ComputeEngineHook:
return ComputeEngineHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
def _load_connection_config(self):
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'false':
return False
elif value.lower() == 'true':
return True
return False
def intify(key, value, default):
if value is None:
return default
if isinstance(value, str) and value.strip() == '':
return default
try:
return int(value)
except ValueError:
raise AirflowException(
f"The {key} field should be a integer. "
f"Current value: \"{value}\" (type: {type(value)}). "
f"Please check the connection configuration."
)
conn = self.get_connection(self.gcp_conn_id)
if conn and conn.conn_type == "gcpssh":
self.instance_name = self._compute_hook._get_field("instance_name", self.instance_name)
self.zone = self._compute_hook._get_field("zone", self.zone)
self.user = conn.login if conn.login else self.user
# self.project_id is skipped intentionally
self.hostname = conn.host if conn.host else self.hostname
self.use_internal_ip = _boolify(self._compute_hook._get_field("use_internal_ip"))
self.use_iap_tunnel = _boolify(self._compute_hook._get_field("use_iap_tunnel"))
self.use_oslogin = _boolify(self._compute_hook._get_field("use_oslogin"))
self.expire_time = intify(
"expire_time",
self._compute_hook._get_field("expire_time"),
self.expire_time,
)
def get_conn(self) -> paramiko.SSHClient:
"""Return SSH connection."""
self._load_connection_config()
if not self.project_id:
self.project_id = self._compute_hook.project_id
missing_fields = [k for k in ["instance_name", "zone", "project_id"] if not getattr(self, k)]
if not self.instance_name or not self.zone or not self.project_id:
raise AirflowException(
f"Required parameters are missing: {missing_fields}. These parameters be passed either as "
"keyword parameter or as extra field in Airflow connection definition. Both are not set!"
)
self.log.info(
"Connecting to instance: instance_name=%s, user=%s, zone=%s, "
"use_internal_ip=%s, use_iap_tunnel=%s, use_os_login=%s",
self.instance_name,
self.user,
self.zone,
self.use_internal_ip,
self.use_iap_tunnel,
self.use_oslogin,
)
if not self.hostname:
hostname = self._compute_hook.get_instance_address(
zone=self.zone,
resource_id=self.instance_name,
project_id=self.project_id,
use_internal_ip=self.use_internal_ip or self.use_iap_tunnel,
)
else:
hostname = self.hostname
privkey, pubkey = self._generate_ssh_key(self.user)
if self.use_oslogin:
user = self._authorize_os_login(pubkey)
else:
user = self.user
self._authorize_compute_engine_instance_metadata(pubkey)
proxy_command = None
if self.use_iap_tunnel:
proxy_command_args = [
'gcloud',
'compute',
'start-iap-tunnel',
str(self.instance_name),
'22',
'--listen-on-stdin',
f'--project={self.project_id}',
f'--zone={self.zone}',
'--verbosity=warning',
]
proxy_command = " ".join(shlex.quote(arg) for arg in proxy_command_args)
sshclient = self._connect_to_instance(user, hostname, privkey, proxy_command)
return sshclient
def _connect_to_instance(self, user, hostname, pkey, proxy_command) -> paramiko.SSHClient:
self.log.info("Opening remote connection to host: username=%s, hostname=%s", user, hostname)
max_time_to_wait = 10
for time_to_wait in exponential_sleep_generator(initial=1, maximum=max_time_to_wait):
try:
client = _GCloudAuthorizedSSHClient(self._compute_hook)
# Default is RejectPolicy
# No known host checking since we are not storing privatekey
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
hostname=hostname,
username=user,
pkey=pkey,
sock=paramiko.ProxyCommand(proxy_command) if proxy_command else None,
look_for_keys=False,
)
return client
except paramiko.SSHException:
# exponential_sleep_generator is an infinite generator, so we need to
# check the end condition.
if time_to_wait == max_time_to_wait:
raise
self.log.info("Failed to connect. Waiting %ds to retry", time_to_wait)
time.sleep(time_to_wait)
raise AirflowException("Caa not connect to instance")
def _authorize_compute_engine_instance_metadata(self, pubkey):
self.log.info("Appending SSH public key to instance metadata")
instance_info = self._compute_hook.get_instance_info(
zone=self.zone, resource_id=self.instance_name, project_id=self.project_id
)
keys = self.user + ":" + pubkey + "\n"
metadata = instance_info['metadata']
items = metadata.get("items", [])
for item in items:
if item.get("key") == "ssh-keys":
keys += item["value"]
item['value'] = keys
break
else:
new_dict = dict(key='ssh-keys', value=keys)
metadata['items'] = [new_dict]
self._compute_hook.set_instance_metadata(
zone=self.zone, resource_id=self.instance_name, metadata=metadata, project_id=self.project_id
)
def _authorize_os_login(self, pubkey):
username = self._oslogin_hook._get_credentials_email()
self.log.info("Importing SSH public key using OSLogin: user=%s", username)
expiration = int((time.time() + self.expire_time) * 1000000)
ssh_public_key = {"key": pubkey, "expiration_time_usec": expiration}
response = self._oslogin_hook.import_ssh_public_key(
user=username, ssh_public_key=ssh_public_key, project_id=self.project_id
)
profile = response.login_profile
account = profile.posix_accounts[0]
user = account.username
return user
def _generate_ssh_key(self, user):
try:
self.log.info("Generating ssh keys...")
pkey_file = StringIO()
pkey_obj = paramiko.RSAKey.generate(2048)
pkey_obj.write_private_key(pkey_file)
pubkey = f"{pkey_obj.get_name()} {pkey_obj.get_base64()} {user}"
return pkey_obj, pubkey
except (OSError, paramiko.SSHException) as err:
raise AirflowException(f"Error encountered creating ssh keys, {err}")
|
|
import socket
from os import mkdir
from os.path import join, exists
from sys import platform
from asyncio import sleep
from math import sqrt
from uuid import uuid4
from enum import Enum
from csv import DictReader
from cyrandom import choice, shuffle, uniform
from time import time
from pickle import dump as pickle_dump, load as pickle_load, HIGHEST_PROTOCOL
from geopy import Point
from geopy.distance import distance
from aiopogo import utilities as pgoapi_utils
from pogeo import get_distance
from . import bounds, sanitized as conf
# iPhones 5 + 5C (4S is really not playable)
IPHONES = {'iPhone5,1': 'N41AP',
'iPhone5,2': 'N42AP',
'iPhone5,3': 'N48AP',
'iPhone5,4': 'N49AP'}
class Units(Enum):
miles = 1
kilometers = 2
meters = 3
def best_factors(n):
return next(((i, n//i) for i in range(int(n**0.5), 0, -1) if n % i == 0))
def percentage_split(seq, percentages):
percentages[-1] += 1.0 - sum(percentages)
prv = 0
size = len(seq)
cum_percentage = 0
for p in percentages:
cum_percentage += p
nxt = int(cum_percentage * size)
yield seq[prv:nxt]
prv = nxt
def get_start_coords(worker_no, grid=conf.GRID, bounds=bounds):
"""Returns center of square for given worker"""
per_column = int((grid[0] * grid[1]) / grid[0])
column = worker_no % per_column
row = int(worker_no / per_column)
part_lat = (bounds.south - bounds.north) / grid[0]
part_lon = (bounds.east - bounds.west) / grid[1]
start_lat = bounds.north + part_lat * row + part_lat / 2
start_lon = bounds.west + part_lon * column + part_lon / 2
return start_lat, start_lon
def float_range(start, end, step):
"""range for floats, also capable of iterating backwards"""
if start > end:
while end <= start:
yield start
start += -step
else:
while start <= end:
yield start
start += step
def get_gains(dist=70):
"""Returns lat and lon gain
Gain is space between circles.
"""
start = Point(*bounds.center)
base = dist * sqrt(3)
height = base * sqrt(3) / 2
dis_a = distance(meters=base)
dis_h = distance(meters=height)
lon_gain = dis_a.destination(point=start, bearing=90).longitude
lat_gain = dis_h.destination(point=start, bearing=0).latitude
return abs(start.latitude - lat_gain), abs(start.longitude - lon_gain)
def round_coords(point, precision, _round=round):
return _round(point[0], precision), _round(point[1], precision)
def get_bootstrap_points(bounds):
coords = []
if bounds.multi:
for b in bounds.polygons:
coords.extend(get_bootstrap_points(b))
return coords
lat_gain, lon_gain = get_gains(conf.BOOTSTRAP_RADIUS)
west, east = bounds.west, bounds.east
bound = bool(bounds)
for map_row, lat in enumerate(
float_range(bounds.south, bounds.north, lat_gain)
):
row_start_lon = west
if map_row % 2 != 0:
row_start_lon -= 0.5 * lon_gain
for lon in float_range(row_start_lon, east, lon_gain):
point = lat, lon
if not bound or point in bounds:
coords.append(point)
shuffle(coords)
return coords
def get_device_info(account):
device_info = {'brand': 'Apple',
'device': 'iPhone',
'manufacturer': 'Apple'}
try:
if account['iOS'].startswith('1'):
device_info['product'] = 'iOS'
else:
device_info['product'] = 'iPhone OS'
device_info['hardware'] = account['model'] + '\x00'
device_info['model'] = IPHONES[account['model']] + '\x00'
except (KeyError, AttributeError):
account = generate_device_info(account)
return get_device_info(account)
device_info['version'] = account['iOS']
device_info['device_id'] = account['id']
return device_info
def generate_device_info(account):
ios8 = ('8.0', '8.0.1', '8.0.2', '8.1', '8.1.1', '8.1.2', '8.1.3', '8.2', '8.3', '8.4', '8.4.1')
ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1', '9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5')
# 10.0 was only for iPhone 7 and 7 Plus, and is rare
ios10 = ('10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2', '10.3.3')
devices = tuple(IPHONES.keys())
account['model'] = choice(devices)
account['id'] = uuid4().hex
if account['model'] in ('iPhone9,1', 'iPhone9,2',
'iPhone9,3', 'iPhone9,4'):
account['iOS'] = choice(ios10)
elif account['model'] in ('iPhone8,1', 'iPhone8,2'):
account['iOS'] = choice(ios9 + ios10)
elif account['model'] == 'iPhone8,4':
# iPhone SE started on 9.3
account['iOS'] = choice(('9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') + ios10)
else:
account['iOS'] = choice(ios8 + ios9 + ios10)
return account
def get_current_hour(now=None, _time=time):
now = now or _time()
return round(now - (now % 3600))
def time_until_time(seconds, seen=None, _time=time):
current_seconds = seen or _time() % 3600
if current_seconds > seconds:
return seconds + 3600 - current_seconds
elif current_seconds + 3600 < seconds:
return seconds - 3600 - current_seconds
else:
return seconds - current_seconds
def get_address():
if conf.MANAGER_ADDRESS:
return conf.MANAGER_ADDRESS
if platform == 'win32':
return r'\\.\pipe\monocle'
if hasattr(socket, 'AF_UNIX'):
return join(conf.DIRECTORY, 'monocle.sock')
return ('127.0.0.1', 5001)
def load_pickle(name, raise_exception=False):
location = join(conf.DIRECTORY, 'pickles', '{}.pickle'.format(name))
try:
with open(location, 'rb') as f:
return pickle_load(f)
except (FileNotFoundError, EOFError):
if raise_exception:
raise FileNotFoundError
else:
return None
def dump_pickle(name, var):
folder = join(conf.DIRECTORY, 'pickles')
try:
mkdir(folder)
except FileExistsError:
pass
except Exception as e:
raise OSError("Failed to create 'pickles' folder, please create it manually") from e
location = join(folder, '{}.pickle'.format(name))
with open(location, 'wb') as f:
pickle_dump(var, f, HIGHEST_PROTOCOL)
def randomize_point(point, amount=0.0003, randomize=uniform):
'''Randomize point, by up to ~47 meters by default.'''
lat, lon = point
return (
randomize(lat - amount, lat + amount),
randomize(lon - amount, lon + amount)
)
def calc_pokemon_level(cp_multiplier):
if cp_multiplier < 0.734:
pokemon_level = (58.35178527 * cp_multiplier * cp_multiplier - 2.838007664 * cp_multiplier + 0.8539209906)
else:
pokemon_level = 171.0112688 * cp_multiplier - 95.20425243
pokemon_level = int((round(pokemon_level) * 2) / 2)
return pokemon_level
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of the `Sequential` model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import keras_export
SINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '
'a single output tensor. For multi-output '
'layers, use the functional API.')
@keras_export('keras.Sequential', 'keras.models.Sequential')
class Sequential(training.Model):
"""`Sequential` groups a linear stack of layers into a `tf.keras.Model`.
`Sequential` provides training and inference features on this model.
Examples:
>>> # Optionally, the first layer can receive an `input_shape` argument:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> # Afterwards, we do automatic shape inference:
>>> model.add(tf.keras.layers.Dense(4))
>>> # This is identical to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_dim=16))
>>> # And to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, batch_input_shape=(None, 16)))
>>> # Note that you can also omit the `input_shape` argument.
>>> # In that case the model doesn't have any weights until the first call
>>> # to a training/evaluation method (since it isn't yet built):
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> # model.weights not created yet
>>> # Whereas if you specify the input shape, the model gets built
>>> # continuously as you are adding layers:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> model.add(tf.keras.layers.Dense(4))
>>> len(model.weights)
4
>>> # When using the delayed-build pattern (no input shape specified), you can
>>> # choose to manually build your model by calling
>>> # `build(batch_input_shape)`:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> model.build((None, 16))
>>> len(model.weights)
4
```python
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit` (or other training and
# evaluation methods).
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, layers=None, name=None):
"""Creates a `Sequential` model instance.
Args:
layers: Optional list of layers to add to the model.
name: Optional name for the model.
"""
super(Sequential, self).__init__(name=name, autocast=False)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
self._auto_track_sub_layers = False
self._layer_call_argspecs = {}
# Add to the model any layers passed to the constructor.
if layers:
if not isinstance(layers, (list, tuple)):
layers = [layers]
tf_utils.assert_no_legacy_layers(layers)
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `Trackable` manages the `_layers` attributes and does filtering
# over it.
layers = super(Sequential, self).layers
if layers and isinstance(layers[0], input_layer.InputLayer):
return layers[1:]
return layers[:]
@property
@trackable_layer_utils.cache_recursive_attribute('dynamic')
def dynamic(self):
return any(layer.dynamic for layer in self.layers)
@trackable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
# If we are passed a Keras tensor created by keras.Input(), we can extract
# the input layer from its keras history and use that without any loss of
# generality.
if hasattr(layer, '_keras_history'):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, input_layer.InputLayer):
layer = origin_layer
if not isinstance(layer, base_layer.Layer):
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
tf_utils.assert_no_legacy_layers([layer])
# This allows the added layer to broadcast mutations to the current
# layer, which is necessary to ensure cache correctness.
layer._attribute_sentinel.add_parent(self._attribute_sentinel)
self.built = False
set_inputs = False
if not self._layers:
if isinstance(layer, input_layer.InputLayer):
# Corner case where the user passes an InputLayer layer via `add`.
assert len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) == 1
set_inputs = True
else:
batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)
if batch_shape:
# Instantiate an input layer.
x = input_layer.Input(
batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
# If an input layer (placeholder) is available.
if len(nest.flatten(layer._inbound_nodes[-1].output_tensors)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [
nest.flatten(layer._inbound_nodes[-1].output_tensors)[0]
]
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if len(nest.flatten(output_tensor)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [output_tensor]
if self.outputs:
# True if set_inputs or self._is_graph_network or if adding a layer
# to an already built deferred seq model.
self.built = True
if set_inputs or self._is_graph_network:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
else:
self._layers.append(layer)
self._handle_deferred_layer_dependencies([layer])
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
# Different Model types add to `._layers` in different ways, so for safety
# we do a cache invalidation to make sure the changes are reflected.
self._attribute_sentinel.invalidate_all()
@trackable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
layer = self._layers.pop()
self._layer_call_argspecs.pop(layer)
self._attribute_sentinel.invalidate_all()
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
elif self._is_graph_network:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs, name=self.name)
self.built = True
@generic_utils.default
def build(self, input_shape=None):
if self._is_graph_network:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
else:
if input_shape is None:
raise ValueError('You must provide an `input_shape` argument.')
input_shape = tuple(input_shape)
self._build_input_shape = input_shape
super(Sequential, self).build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name
if self._build_input_shape is None:
input_shapes = nest.map_structure(_get_shape_tuple, inputs)
self._build_input_shape = input_shapes
if self._is_graph_network:
if not self.built:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
return super(Sequential, self).call(inputs, training=training, mask=mask)
outputs = inputs # handle the corner case where self.layers is empty
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and `outputs`
# are the outputs of `layer` applied to `inputs`. At the end of each
# iteration `inputs` is set to `outputs` to prepare for the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if 'mask' in argspec:
kwargs['mask'] = mask
if 'training' in argspec:
kwargs['training'] = training
outputs = layer(inputs, **kwargs)
if len(nest.flatten(outputs)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# `outputs` will be the inputs to the next layer.
inputs = outputs
mask = outputs._keras_mask
return outputs
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
outputs = self.call(inputs, mask=mask)
return outputs._keras_mask
@deprecated('2021-01-01', 'Please use `model.predict()` instead.')
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
@deprecated('2021-01-01',
'Please use instead:'
'* `np.argmax(model.predict(x), axis=-1)`, '
' if your model does multi-class classification '
' (e.g. if it uses a `softmax` last-layer activation).'
'* `(model.predict(x) > 0.5).astype("int32")`, '
' if your model does binary classification '
' (e.g. if it uses a `sigmoid` last-layer activation).')
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
layer_configs = []
for layer in self.layers:
layer_configs.append(generic_utils.serialize_keras_object(layer))
# When constructed using an `InputLayer` the first non-input layer may not
# have the shape information to reconstruct `Sequential` as a graph network.
if (self._is_graph_network and layer_configs and
'batch_input_shape' not in layer_configs[0]['config'] and
isinstance(self._layers[0], input_layer.InputLayer)):
batch_input_shape = self._layers[0]._batch_input_shape
layer_configs[0]['config']['batch_input_shape'] = batch_input_shape
config = {
'name': self.name,
'layers': copy.deepcopy(layer_configs)
}
if self._build_input_shape is not None:
config['build_input_shape'] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'name' in config:
name = config['name']
build_input_shape = config.get('build_input_shape')
layer_configs = config['layers']
else:
name = None
build_input_shape = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
layer = layer_module.deserialize(layer_config,
custom_objects=custom_objects)
model.add(layer)
if (not model.inputs and build_input_shape and
isinstance(build_input_shape, (tuple, list))):
model.build(build_input_shape)
return model
@property
def input_spec(self):
if self.layers and hasattr(self.layers[0], 'input_spec'):
return self.layers[0].input_spec
return None
@property
def _trackable_saved_model_saver(self):
return model_serialization.SequentialSavedModelSaver(self)
def _get_shape_tuple(t):
if hasattr(t, 'shape'):
shape = t.shape
if shape.rank is not None:
return tuple(shape.as_list())
return None
return None
|
|
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_close
from numpy.testing import (assert_array_equal, assert_raises,
assert_almost_equal)
import skimage
from skimage import data
from skimage import exposure
from skimage.exposure.exposure import intensity_range
from skimage.color import rgb2gray
from skimage.util.dtype import dtype_range
from skimage._shared._warnings import expected_warnings
# Test integer histograms
# =======================
def test_negative_overflow():
im = np.array([-1, 127], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, np.arange(-1, 128))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
def test_all_negative_image():
im = np.array([-128, -1], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, np.arange(-128, 0))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
# Test histogram equalization
# ===========================
np.random.seed(0)
test_img_int = data.camera()
# squeeze image intensities to lower image contrast
test_img = skimage.img_as_float(test_img_int)
test_img = exposure.rescale_intensity(test_img / 5. + 100)
def test_equalize_uint8_approx():
"""Check integer bins used for uint8 images."""
img_eq0 = exposure.equalize_hist(test_img_int)
img_eq1 = exposure.equalize_hist(test_img_int, nbins=3)
np.testing.assert_allclose(img_eq0, img_eq1)
def test_equalize_ubyte():
with expected_warnings(['precision loss']):
img = skimage.img_as_ubyte(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
def test_equalize_float():
img = skimage.img_as_float(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
def test_equalize_masked():
img = skimage.img_as_float(test_img)
mask = np.zeros(test_img.shape)
mask[50:150, 50:250] = 1
img_mask_eq = exposure.equalize_hist(img, mask=mask)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_mask_eq)
check_cdf_slope(cdf)
assert not (img_eq == img_mask_eq).all()
def check_cdf_slope(cdf):
"""Slope of cdf which should equal 1 for an equalized histogram."""
norm_intensity = np.linspace(0, 1, len(cdf))
slope, intercept = np.polyfit(norm_intensity, cdf, 1)
assert 0.9 < slope < 1.1
# Test intensity range
# ====================
def test_intensity_range_uint8():
image = np.array([0, 1], dtype=np.uint8)
input_and_expected = [('image', [0, 1]),
('dtype', [0, 255]),
((10, 20), [10, 20])]
for range_values, expected_values in input_and_expected:
out = intensity_range(image, range_values=range_values)
yield assert_array_equal, out, expected_values
def test_intensity_range_float():
image = np.array([0.1, 0.2], dtype=np.float64)
input_and_expected = [('image', [0.1, 0.2]),
('dtype', [-1, 1]),
((0.3, 0.4), [0.3, 0.4])]
for range_values, expected_values in input_and_expected:
out = intensity_range(image, range_values=range_values)
yield assert_array_equal, out, expected_values
def test_intensity_range_clipped_float():
image = np.array([0.1, 0.2], dtype=np.float64)
out = intensity_range(image, range_values='dtype', clip_negative=True)
assert_array_equal(out, (0, 1))
# Test rescale intensity
# ======================
uint10_max = 2**10 - 1
uint12_max = 2**12 - 1
uint14_max = 2**14 - 1
uint16_max = 2**16 - 1
def test_rescale_stretch():
image = np.array([51, 102, 153], dtype=np.uint8)
out = exposure.rescale_intensity(image)
assert out.dtype == np.uint8
assert_close(out, [0, 127, 255])
def test_rescale_shrink():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image)
assert_close(out, [0, 0.5, 1])
def test_rescale_in_range():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image, in_range=(0, 255))
assert_close(out, [0.2, 0.4, 0.6])
def test_rescale_in_range_clip():
image = np.array([51., 102., 153.])
out = exposure.rescale_intensity(image, in_range=(0, 102))
assert_close(out, [0.5, 1, 1])
def test_rescale_out_range():
image = np.array([-10, 0, 10], dtype=np.int8)
out = exposure.rescale_intensity(image, out_range=(0, 127))
assert out.dtype == np.int8
assert_close(out, [0, 63, 127])
def test_rescale_named_in_range():
image = np.array([0, uint10_max, uint10_max + 100], dtype=np.uint16)
out = exposure.rescale_intensity(image, in_range='uint10')
assert_close(out, [0, uint16_max, uint16_max])
def test_rescale_named_out_range():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint10')
assert_close(out, [0, uint10_max])
def test_rescale_uint12_limits():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint12')
assert_close(out, [0, uint12_max])
def test_rescale_uint14_limits():
image = np.array([0, uint16_max], dtype=np.uint16)
out = exposure.rescale_intensity(image, out_range='uint14')
assert_close(out, [0, uint14_max])
# Test adaptive histogram equalization
# ====================================
def test_adapthist_scalar():
"""Test a scalar uint8 image
"""
img = skimage.img_as_ubyte(data.moon())
adapted = exposure.equalize_adapthist(img, clip_limit=0.02)
assert adapted.min() == 0.0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = skimage.exposure.rescale_intensity(skimage.img_as_float(img))
assert_almost_equal = np.testing.assert_almost_equal
assert_almost_equal(peak_snr(full_scale, adapted), 101.2295, 3)
assert_almost_equal(norm_brightness_err(full_scale, adapted),
0.041, 3)
return img, adapted
def test_adapthist_grayscale():
"""Test a grayscale float image
"""
img = skimage.img_as_float(data.astronaut())
img = rgb2gray(img)
img = np.dstack((img, img, img))
with expected_warnings(['precision loss|non-contiguous input']):
adapted = exposure.equalize_adapthist(img, 10, 9, clip_limit=0.01,
nbins=128)
assert_almost_equal = np.testing.assert_almost_equal
assert img.shape == adapted.shape
assert_almost_equal(peak_snr(img, adapted), 97.6876, 3)
assert_almost_equal(norm_brightness_err(img, adapted), 0.0591, 3)
return data, adapted
def test_adapthist_color():
"""Test an RGB color uint16 image
"""
img = skimage.img_as_uint(data.astronaut())
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
hist, bin_centers = exposure.histogram(img)
assert len(w) > 0
with expected_warnings(['precision loss']):
adapted = exposure.equalize_adapthist(img, clip_limit=0.01)
assert_almost_equal = np.testing.assert_almost_equal
assert adapted.min() == 0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = skimage.exposure.rescale_intensity(img)
assert_almost_equal(peak_snr(full_scale, adapted), 109.6, 1)
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.02, 2)
return data, adapted
def test_adapthist_alpha():
"""Test an RGBA color image
"""
img = skimage.img_as_float(data.astronaut())
alpha = np.ones((img.shape[0], img.shape[1]), dtype=float)
img = np.dstack((img, alpha))
with expected_warnings(['precision loss']):
adapted = exposure.equalize_adapthist(img)
assert adapted.shape != img.shape
img = img[:, :, :3]
full_scale = skimage.exposure.rescale_intensity(img)
assert img.shape == adapted.shape
assert_almost_equal = np.testing.assert_almost_equal
assert_almost_equal(peak_snr(full_scale, adapted), 109.60, 2)
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0235, 3)
def peak_snr(img1, img2):
"""Peak signal to noise ratio of two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
peak_snr : float
Peak signal to noise ratio
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1.copy()), rgb2gray(img2.copy())
img1 = skimage.img_as_float(img1)
img2 = skimage.img_as_float(img2)
mse = 1. / img1.size * np.square(img1 - img2).sum()
_, max_ = dtype_range[img1.dtype.type]
return 20 * np.log(max_ / mse)
def norm_brightness_err(img1, img2):
"""Normalized Absolute Mean Brightness Error between two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
norm_brightness_error : float
Normalized absolute mean brightness error
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1), rgb2gray(img2)
ambe = np.abs(img1.mean() - img2.mean())
nbe = ambe / dtype_range[img1.dtype.type][1]
return nbe
# Test Gamma Correction
# =====================
def test_adjust_gamma_one():
"""Same image should be returned for gamma equal to one"""
image = np.random.uniform(0, 255, (8, 8))
result = exposure.adjust_gamma(image, 1)
assert_array_equal(result, image)
def test_adjust_gamma_zero():
"""White image should be returned for gamma equal to zero"""
image = np.random.uniform(0, 255, (8, 8))
result = exposure.adjust_gamma(image, 0)
dtype = image.dtype.type
assert_array_equal(result, dtype_range[dtype][1])
def test_adjust_gamma_less_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 31, 45, 55, 63, 71, 78, 84],
[ 90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]], dtype=np.uint8)
result = exposure.adjust_gamma(image, 0.5)
assert_array_equal(result, expected)
def test_adjust_gamma_greater_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 0, 0, 0, 1, 1, 2, 3],
[ 4, 5, 6, 7, 9, 10, 12, 14],
[ 16, 18, 20, 22, 25, 27, 30, 33],
[ 36, 39, 42, 45, 49, 52, 56, 60],
[ 64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.uint8)
result = exposure.adjust_gamma(image, 2)
assert_array_equal(result, expected)
def test_adjust_gamma_neggative():
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
assert_raises(ValueError, exposure.adjust_gamma, image, -1)
# Test Logarithmic Correction
# ===========================
def test_adjust_log():
"""Verifying the output with expected results for logarithmic
correction with multiplier constant multiplier equal to unity"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 5, 11, 16, 22, 27, 33, 38],
[ 43, 48, 53, 58, 63, 68, 73, 77],
[ 82, 86, 91, 95, 100, 104, 109, 113],
[117, 121, 125, 129, 133, 137, 141, 145],
[149, 153, 157, 160, 164, 168, 172, 175],
[179, 182, 186, 189, 193, 196, 199, 203],
[206, 209, 213, 216, 219, 222, 225, 228],
[231, 234, 238, 241, 244, 246, 249, 252]], dtype=np.uint8)
result = exposure.adjust_log(image, 1)
assert_array_equal(result, expected)
def test_adjust_inv_log():
"""Verifying the output with expected results for inverse logarithmic
correction with multiplier constant multiplier equal to unity"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 0, 2, 5, 8, 11, 14, 17, 20],
[ 23, 26, 29, 32, 35, 38, 41, 45],
[ 48, 51, 55, 58, 61, 65, 68, 72],
[ 76, 79, 83, 87, 90, 94, 98, 102],
[106, 110, 114, 118, 122, 126, 130, 134],
[138, 143, 147, 151, 156, 160, 165, 170],
[174, 179, 184, 188, 193, 198, 203, 208],
[213, 218, 224, 229, 234, 239, 245, 250]], dtype=np.uint8)
result = exposure.adjust_log(image, 1, True)
assert_array_equal(result, expected)
# Test Sigmoid Correction
# =======================
def test_adjust_sigmoid_cutoff_one():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to one and gain of 5"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 1, 1, 1, 2, 2, 2, 2, 2],
[ 3, 3, 3, 4, 4, 4, 5, 5],
[ 5, 6, 6, 7, 7, 8, 9, 10],
[ 10, 11, 12, 13, 14, 15, 16, 18],
[ 19, 20, 22, 24, 25, 27, 29, 32],
[ 34, 36, 39, 41, 44, 47, 50, 54],
[ 57, 61, 64, 68, 72, 76, 80, 85],
[ 89, 94, 99, 104, 108, 113, 118, 123]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 1, 5)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_zero():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to zero and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[127, 137, 147, 156, 166, 175, 183, 191],
[198, 205, 211, 216, 221, 225, 229, 232],
[235, 238, 240, 242, 244, 245, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253],
[253, 253, 253, 253, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0, 10)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_half():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to half and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[ 1, 1, 2, 2, 3, 3, 4, 5],
[ 5, 6, 7, 9, 10, 12, 14, 16],
[ 19, 22, 25, 29, 34, 39, 44, 50],
[ 57, 64, 72, 80, 89, 99, 108, 118],
[128, 138, 148, 158, 167, 176, 184, 192],
[199, 205, 211, 217, 221, 226, 229, 233],
[236, 238, 240, 242, 244, 246, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0.5, 10)
assert_array_equal(result, expected)
def test_adjust_inv_sigmoid_cutoff_half():
"""Verifying the output with expected results for inverse sigmoid
correction with cutoff equal to half and gain of 10"""
image = np.arange(0, 255, 4, np.uint8).reshape(8,8)
expected = np.array([[253, 253, 252, 252, 251, 251, 250, 249],
[249, 248, 247, 245, 244, 242, 240, 238],
[235, 232, 229, 225, 220, 215, 210, 204],
[197, 190, 182, 174, 165, 155, 146, 136],
[126, 116, 106, 96, 87, 78, 70, 62],
[ 55, 49, 43, 37, 33, 28, 25, 21],
[ 18, 16, 14, 12, 10, 8, 7, 6],
[ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8)
result = exposure.adjust_sigmoid(image, 0.5, 10, True)
assert_array_equal(result, expected)
def test_negative():
image = np.arange(-10, 245, 4).reshape(8, 8).astype(np.double)
assert_raises(ValueError, exposure.adjust_gamma, image)
def test_is_low_contrast():
image = np.linspace(0, 0.04, 100)
assert exposure.is_low_contrast(image)
image[-1] = 1
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image * 255).astype(np.uint8)
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image.astype(np.uint16)) * 2**8
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
The IPython engine application
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import json
import os
import sys
import time
import zmq
from zmq.eventloop import ioloop
from IPython.core.profiledir import ProfileDir
from IPython.parallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
base_flags,
catch_config_error,
)
from IPython.kernel.zmq.log import EnginePUBHandler
from IPython.kernel.zmq.ipkernel import Kernel
from IPython.kernel.zmq.kernelapp import IPKernelApp
from IPython.kernel.zmq.session import (
Session, session_aliases, session_flags
)
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell
from IPython.config.configurable import Configurable
from IPython.parallel.engine.engine import EngineFactory
from IPython.parallel.util import disambiguate_ip_address
from IPython.utils.importstring import import_item
from IPython.utils.py3compat import cast_bytes
from IPython.utils.traitlets import Bool, Unicode, Dict, List, Float, Instance
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
_description = """Start an IPython engine for parallel computing.
IPython engines run in parallel and perform computations on behalf of a client
and controller. A controller needs to be started before the engines. The
engine can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
usually located in your ipython directory and named as "profile_name".
See the `profile` and `profile-dir` options for details.
"""
_examples = """
ipengine --ip=192.168.0.1 --port=1000 # connect to hub at ip and port
ipengine --log-to-file --log-level=DEBUG # log to a file with DEBUG verbosity
"""
#-----------------------------------------------------------------------------
# MPI configuration
#-----------------------------------------------------------------------------
mpi4py_init = """from mpi4py import MPI as mpi
mpi.size = mpi.COMM_WORLD.Get_size()
mpi.rank = mpi.COMM_WORLD.Get_rank()
"""
pytrilinos_init = """from PyTrilinos import Epetra
class SimpleStruct:
pass
mpi = SimpleStruct()
mpi.rank = 0
mpi.size = 0
"""
class MPI(Configurable):
"""Configurable for MPI initialization"""
use = Unicode('', config=True,
help='How to enable MPI (mpi4py, pytrilinos, or empty string to disable).'
)
def _use_changed(self, name, old, new):
# load default init script if it's not set
if not self.init_script:
self.init_script = self.default_inits.get(new, '')
init_script = Unicode('', config=True,
help="Initialization code for MPI")
default_inits = Dict({'mpi4py' : mpi4py_init, 'pytrilinos':pytrilinos_init},
config=True)
#-----------------------------------------------------------------------------
# Main application
#-----------------------------------------------------------------------------
aliases = dict(
file = 'IPEngineApp.url_file',
c = 'IPEngineApp.startup_command',
s = 'IPEngineApp.startup_script',
url = 'EngineFactory.url',
ssh = 'EngineFactory.sshserver',
sshkey = 'EngineFactory.sshkey',
ip = 'EngineFactory.ip',
transport = 'EngineFactory.transport',
port = 'EngineFactory.regport',
location = 'EngineFactory.location',
timeout = 'EngineFactory.timeout',
mpi = 'MPI.use',
)
aliases.update(base_aliases)
aliases.update(session_aliases)
flags = {}
flags.update(base_flags)
flags.update(session_flags)
class IPEngineApp(BaseParallelApplication):
name = 'ipengine'
description = _description
examples = _examples
classes = List([ZMQInteractiveShell, ProfileDir, Session, EngineFactory, Kernel, MPI])
startup_script = Unicode(u'', config=True,
help='specify a script to be run at startup')
startup_command = Unicode('', config=True,
help='specify a command to be run at startup')
url_file = Unicode(u'', config=True,
help="""The full location of the file containing the connection information for
the controller. If this is not given, the file must be in the
security directory of the cluster directory. This location is
resolved using the `profile` or `profile_dir` options.""",
)
wait_for_url_file = Float(5, config=True,
help="""The maximum number of seconds to wait for url_file to exist.
This is useful for batch-systems and shared-filesystems where the
controller and engine are started at the same time and it
may take a moment for the controller to write the connector files.""")
url_file_name = Unicode(u'ipcontroller-engine.json', config=True)
def _cluster_id_changed(self, name, old, new):
if new:
base = 'ipcontroller-%s' % new
else:
base = 'ipcontroller'
self.url_file_name = "%s-engine.json" % base
log_url = Unicode('', config=True,
help="""The URL for the iploggerapp instance, for forwarding
logging to a central location.""")
# an IPKernelApp instance, used to setup listening for shell frontends
kernel_app = Instance(IPKernelApp)
aliases = Dict(aliases)
flags = Dict(flags)
@property
def kernel(self):
"""allow access to the Kernel object, so I look like IPKernelApp"""
return self.engine.kernel
def find_url_file(self):
"""Set the url file.
Here we don't try to actually see if it exists for is valid as that
is hadled by the connection logic.
"""
config = self.config
# Find the actual controller key file
if not self.url_file:
self.url_file = os.path.join(
self.profile_dir.security_dir,
self.url_file_name
)
def load_connector_file(self):
"""load config from a JSON connector file,
at a *lower* priority than command-line/config files.
"""
self.log.info("Loading url_file %r", self.url_file)
config = self.config
with open(self.url_file) as f:
d = json.loads(f.read())
# allow hand-override of location for disambiguation
# and ssh-server
try:
config.EngineFactory.location
except AttributeError:
config.EngineFactory.location = d['location']
try:
config.EngineFactory.sshserver
except AttributeError:
config.EngineFactory.sshserver = d.get('ssh')
location = config.EngineFactory.location
proto, ip = d['interface'].split('://')
ip = disambiguate_ip_address(ip, location)
d['interface'] = '%s://%s' % (proto, ip)
# DO NOT allow override of basic URLs, serialization, or key
# JSON file takes top priority there
config.Session.key = cast_bytes(d['key'])
config.Session.signature_scheme = d['signature_scheme']
config.EngineFactory.url = d['interface'] + ':%i' % d['registration']
config.Session.packer = d['pack']
config.Session.unpacker = d['unpack']
self.log.debug("Config changed:")
self.log.debug("%r", config)
self.connection_info = d
def bind_kernel(self, **kwargs):
"""Promote engine to listening kernel, accessible to frontends."""
if self.kernel_app is not None:
return
self.log.info("Opening ports for direct connections as an IPython kernel")
kernel = self.kernel
kwargs.setdefault('config', self.config)
kwargs.setdefault('log', self.log)
kwargs.setdefault('profile_dir', self.profile_dir)
kwargs.setdefault('session', self.engine.session)
app = self.kernel_app = IPKernelApp(**kwargs)
# allow IPKernelApp.instance():
IPKernelApp._instance = app
app.init_connection_file()
# relevant contents of init_sockets:
app.shell_port = app._bind_socket(kernel.shell_streams[0], app.shell_port)
app.log.debug("shell ROUTER Channel on port: %i", app.shell_port)
app.iopub_port = app._bind_socket(kernel.iopub_socket, app.iopub_port)
app.log.debug("iopub PUB Channel on port: %i", app.iopub_port)
kernel.stdin_socket = self.engine.context.socket(zmq.ROUTER)
app.stdin_port = app._bind_socket(kernel.stdin_socket, app.stdin_port)
app.log.debug("stdin ROUTER Channel on port: %i", app.stdin_port)
# start the heartbeat, and log connection info:
app.init_heartbeat()
app.log_connection_info()
app.write_connection_file()
def init_engine(self):
# This is the working dir by now.
sys.path.insert(0, '')
config = self.config
# print config
self.find_url_file()
# was the url manually specified?
keys = set(self.config.EngineFactory.keys())
keys = keys.union(set(self.config.RegistrationFactory.keys()))
if keys.intersection(set(['ip', 'url', 'port'])):
# Connection info was specified, don't wait for the file
url_specified = True
self.wait_for_url_file = 0
else:
url_specified = False
if self.wait_for_url_file and not os.path.exists(self.url_file):
self.log.warn("url_file %r not found", self.url_file)
self.log.warn("Waiting up to %.1f seconds for it to arrive.", self.wait_for_url_file)
tic = time.time()
while not os.path.exists(self.url_file) and (time.time()-tic < self.wait_for_url_file):
# wait for url_file to exist, or until time limit
time.sleep(0.1)
if os.path.exists(self.url_file):
self.load_connector_file()
elif not url_specified:
self.log.fatal("Fatal: url file never arrived: %s", self.url_file)
self.exit(1)
try:
exec_lines = config.IPKernelApp.exec_lines
except AttributeError:
try:
exec_lines = config.InteractiveShellApp.exec_lines
except AttributeError:
exec_lines = config.IPKernelApp.exec_lines = []
try:
exec_files = config.IPKernelApp.exec_files
except AttributeError:
try:
exec_files = config.InteractiveShellApp.exec_files
except AttributeError:
exec_files = config.IPKernelApp.exec_files = []
if self.startup_script:
exec_files.append(self.startup_script)
if self.startup_command:
exec_lines.append(self.startup_command)
# Create the underlying shell class and Engine
# shell_class = import_item(self.master_config.Global.shell_class)
# print self.config
try:
self.engine = EngineFactory(config=config, log=self.log,
connection_info=self.connection_info,
)
except:
self.log.error("Couldn't start the Engine", exc_info=True)
self.exit(1)
def forward_logging(self):
if self.log_url:
self.log.info("Forwarding logging to %s", self.log_url)
context = self.engine.context
lsock = context.socket(zmq.PUB)
lsock.connect(self.log_url)
handler = EnginePUBHandler(self.engine, lsock)
handler.setLevel(self.log_level)
self.log.addHandler(handler)
def init_mpi(self):
global mpi
self.mpi = MPI(parent=self)
mpi_import_statement = self.mpi.init_script
if mpi_import_statement:
try:
self.log.info("Initializing MPI:")
self.log.info(mpi_import_statement)
exec mpi_import_statement in globals()
except:
mpi = None
else:
mpi = None
@catch_config_error
def initialize(self, argv=None):
super(IPEngineApp, self).initialize(argv)
self.init_mpi()
self.init_engine()
self.forward_logging()
def start(self):
self.engine.start()
try:
self.engine.loop.start()
except KeyboardInterrupt:
self.log.critical("Engine Interrupted, shutting down...\n")
launch_new_instance = IPEngineApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
|
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import eventlet
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as constants
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import periodic_task
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class MeteringPluginRpc(proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, host):
super(MeteringPluginRpc,
self).__init__(topic=topics.METERING_AGENT,
default_version=self.BASE_RPC_API_VERSION)
def _get_sync_data_metering(self, context):
try:
return self.call(context,
self.make_msg('get_sync_data_metering',
host=self.host),
topic=topics.METERING_PLUGIN)
except Exception:
LOG.exception(_("Failed synchronizing routers"))
class MeteringAgent(MeteringPluginRpc, manager.Manager):
Opts = [
cfg.StrOpt('driver',
default='neutron.services.metering.drivers.noop.'
'noop_driver.NoopMeteringDriver',
help=_("Metering driver")),
cfg.IntOpt('measure_interval', default=30,
help=_("Interval between two metering measures")),
cfg.IntOpt('report_interval', default=300,
help=_("Interval between two metering reports")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._load_drivers()
self.root_helper = config.get_root_helper(self.conf)
self.context = context.get_admin_context_without_session()
self.metering_info = {}
self.metering_loop = loopingcall.FixedIntervalLoopingCall(
self._metering_loop
)
measure_interval = self.conf.measure_interval
self.last_report = 0
self.metering_loop.start(interval=measure_interval)
self.host = host
self.label_tenant_id = {}
self.routers = {}
self.metering_infos = {}
super(MeteringAgent, self).__init__(host=self.conf.host)
def _load_drivers(self):
"""Loads plugin-driver from configuration."""
LOG.info(_("Loading Metering driver %s"), self.conf.driver)
if not self.conf.driver:
raise SystemExit(_('A metering driver must be specified'))
self.metering_driver = importutils.import_object(
self.conf.driver, self, self.conf)
def _metering_notification(self):
for label_id, info in self.metering_infos.items():
data = {'label_id': label_id,
'tenant_id': self.label_tenant_id.get(label_id),
'pkts': info['pkts'],
'bytes': info['bytes'],
'time': info['time'],
'first_update': info['first_update'],
'last_update': info['last_update'],
'host': self.host}
LOG.debug(_("Send metering report: %s"), data)
notifier_api.notify(self.context,
notifier_api.publisher_id('metering'),
'l3.meter',
notifier_api.CONF.default_notification_level,
data)
info['pkts'] = 0
info['bytes'] = 0
info['time'] = 0
def _purge_metering_info(self):
ts = int(time.time())
report_interval = self.conf.report_interval
for label_id, info in self.metering_info.items():
if info['last_update'] > ts + report_interval:
del self.metering_info[label_id]
def _add_metering_info(self, label_id, pkts, bytes):
ts = int(time.time())
info = self.metering_infos.get(label_id, {'bytes': 0,
'pkts': 0,
'time': 0,
'first_update': ts,
'last_update': ts})
info['bytes'] += bytes
info['pkts'] += pkts
info['time'] += ts - info['last_update']
info['last_update'] = ts
self.metering_infos[label_id] = info
return info
def _add_metering_infos(self):
self.label_tenant_id = {}
for router in self.routers.values():
tenant_id = router['tenant_id']
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
self.label_tenant_id[label_id] = tenant_id
tenant_id = self.label_tenant_id.get
accs = self._get_traffic_counters(self.context, self.routers.values())
if not accs:
return
for label_id, acc in accs.items():
self._add_metering_info(label_id, acc['pkts'], acc['bytes'])
def _metering_loop(self):
self._add_metering_infos()
ts = int(time.time())
delta = ts - self.last_report
report_interval = self.conf.report_interval
if delta > report_interval:
self._metering_notification()
self._purge_metering_info()
self.last_report = ts
@utils.synchronized('metering-agent')
def _invoke_driver(self, context, meterings, func_name):
try:
return getattr(self.metering_driver, func_name)(context, meterings)
except AttributeError:
LOG.exception(_("Driver %(driver)s does not implement %(func)s"),
{'driver': self.conf.driver,
'func': func_name})
except RuntimeError:
LOG.exception(_("Driver %(driver)s:%(func)s runtime error"),
{'driver': self.conf.driver,
'func': func_name})
@periodic_task.periodic_task(run_immediately=True)
def _sync_routers_task(self, context):
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def router_deleted(self, context, router_id):
self._add_metering_infos()
if router_id in self.routers:
del self.routers[router_id]
return self._invoke_driver(context, router_id,
'remove_router')
def routers_updated(self, context, routers=None):
if not routers:
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def _update_routers(self, context, routers):
for router in routers:
self.routers[router['id']] = router
return self._invoke_driver(context, routers,
'update_routers')
def _get_traffic_counters(self, context, routers):
LOG.debug(_("Get router traffic counters"))
return self._invoke_driver(context, routers, 'get_traffic_counters')
def update_metering_label_rules(self, context, routers):
LOG.debug(_("Update metering rules from agent"))
return self._invoke_driver(context, routers,
'update_metering_label_rules')
def add_metering_label(self, context, routers):
LOG.debug(_("Creating a metering label from agent"))
return self._invoke_driver(context, routers,
'add_metering_label')
def remove_metering_label(self, context, routers):
self._add_metering_infos()
LOG.debug(_("Delete a metering label from agent"))
return self._invoke_driver(context, routers,
'remove_metering_label')
class MeteringAgentWithStateReport(MeteringAgent):
def __init__(self, host, conf=None):
super(MeteringAgentWithStateReport, self).__init__(host=host,
conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-metering-agent',
'host': host,
'topic': topics.METERING_AGENT,
'configurations': {
'metering_driver': self.conf.driver,
'measure_interval':
self.conf.measure_interval,
'report_interval': self.conf.report_interval
},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_METERING}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
LOG.info(_("agent_updated by server side %s!"), payload)
def main():
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(MeteringAgent.Opts)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf(project='neutron')
config.setup_logging(conf)
server = neutron_service.Service.create(
binary='neutron-metering-agent',
topic=topics.METERING_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.metering.agents.'
'metering_agent.MeteringAgentWithStateReport')
service.launch(server).wait()
|
|
import sys
import random
import signal
#Timer handler, helper function
class TimedOutExc(Exception):
pass
def handler(signum, frame):
#print 'Signal handler called with signal', signum
raise TimedOutExc()
class Manual_player:
def __init__(self):
pass
def move(self, temp_board, temp_block, old_move, flag):
print 'Enter your move: <format:row column> (you\'re playing with', flag + ")"
mvp = raw_input()
mvp = mvp.split()
return (int(mvp[0]), int(mvp[1]))
class Player1:
def __init__(self):
pass
def move(self,temp_board,temp_block,old_move,flag):
# while(1):
# pass
for_corner = [0,2,3,5,6,8]
#List of permitted blocks, based on old move.
blocks_allowed = []
if old_move[0] in for_corner and old_move[1] in for_corner:
## we will have 3 representative blocks, to choose from
if old_move[0] % 3 == 0 and old_move[1] % 3 == 0:
## top left 3 blocks are allowed
blocks_allowed = [0, 1, 3]
elif old_move[0] % 3 == 0 and old_move[1] in [2, 5, 8]:
## top right 3 blocks are allowed
blocks_allowed = [1,2,5]
elif old_move[0] in [2,5, 8] and old_move[1] % 3 == 0:
## bottom left 3 blocks are allowed
blocks_allowed = [3,6,7]
elif old_move[0] in [2,5,8] and old_move[1] in [2,5,8]:
### bottom right 3 blocks are allowed
blocks_allowed = [5,7,8]
else:
print "SOMETHING REALLY WEIRD HAPPENED!"
sys.exit(1)
else:
#### we will have only 1 block to choose from (or maybe NONE of them, which calls for a free move)
if old_move[0] % 3 == 0 and old_move[1] in [1,4,7]:
## upper-center block
blocks_allowed = [1]
elif old_move[0] in [1,4,7] and old_move[1] % 3 == 0:
## middle-left block
blocks_allowed = [3]
elif old_move[0] in [2,5,8] and old_move[1] in [1,4,7]:
## lower-center block
blocks_allowed = [7]
elif old_move[0] in [1,4,7] and old_move[1] in [2,5,8]:
## middle-right block
blocks_allowed = [5]
elif old_move[0] in [1,4,7] and old_move[1] in [1,4,7]:
blocks_allowed = [4]
# We get all the empty cells in allowed blocks. If they're all full, we get all the empty cells in the entire board.
cells = get_empty_out_of(temp_board, blocks_allowed)
return cells[random.randrange(len(cells))]
class Player2:
def __init__(self):
pass
def move(self,temp_board,temp_block,old_move,flag):
for_corner = [0,2,3,5,6,8]
#List of permitted blocks, based on old move.
blocks_allowed = []
if old_move[0] in for_corner and old_move[1] in for_corner:
## we will have 3 representative blocks, to choose from
if old_move[0] % 3 == 0 and old_move[1] % 3 == 0:
## top left 3 blocks are allowed
blocks_allowed = [0, 1, 3]
elif old_move[0] % 3 == 0 and old_move[1] in [2, 5, 8]:
## top right 3 blocks are allowed
blocks_allowed = [1,2,5]
elif old_move[0] in [2,5, 8] and old_move[1] % 3 == 0:
## bottom left 3 blocks are allowed
blocks_allowed = [3,6,7]
elif old_move[0] in [2,5,8] and old_move[1] in [2,5,8]:
### bottom right 3 blocks are allowed
blocks_allowed = [5,7,8]
else:
print "SOMETHING REALLY WEIRD HAPPENED!"
sys.exit(1)
else:
#### we will have only 1 block to choose from (or maybe NONE of them, which calls for a free move)
if old_move[0] % 3 == 0 and old_move[1] in [1,4,7]:
## upper-center block
blocks_allowed = [1]
elif old_move[0] in [1,4,7] and old_move[1] % 3 == 0:
## middle-left block
blocks_allowed = [3]
elif old_move[0] in [2,5,8] and old_move[1] in [1,4,7]:
## lower-center block
blocks_allowed = [7]
elif old_move[0] in [1,4,7] and old_move[1] in [2,5,8]:
## middle-right block
blocks_allowed = [5]
elif old_move[0] in [1,4,7] and old_move[1] in [1,4,7]:
blocks_allowed = [4]
# We get all the empty cells in allowed blocks. If they're all full, we get all the empty cells in the entire board.
cells = get_empty_out_of(temp_board,blocks_allowed)
return cells[random.randrange(len(cells))]
#Initializes the game
def get_init_board_and_blockstatus():
board = []
for i in range(9):
row = ['-']*9
board.append(row)
block_stat = ['-']*9
return board, block_stat
# Checks if player has messed with the board. Don't mess with the board that is passed to your move function.
def verification_fails_board(board_game, temp_board_state):
return board_game == temp_board_state
# Checks if player has messed with the block. Don't mess with the block array that is passed to your move function.
def verification_fails_block(block_stat, temp_block_stat):
return block_stat == temp_block_stat
#Gets empty cells from the list of possible blocks. Hence gets valid moves.
def get_empty_out_of(gameb, blal):
cells = [] # it will be list of tuples
#Iterate over possible blocks and get empty cells
for idb in blal:
id1 = idb/3
id2 = idb%3
for i in range(id1*3,id1*3+3):
for j in range(id2*3,id2*3+3):
if gameb[i][j] == '-':
cells.append((i,j))
# If all the possible blocks are full, you can move anywhere
if cells == []:
for i in range(9):
for j in range(9):
if gameb[i][j] == '-':
cells.append((i,j))
return cells
# Note that even if someone has won a block, it is not abandoned. But then, there's no point winning it again!
# Returns True if move is valid
def check_valid_move(game_board, current_move, old_move):
# first we need to check whether current_move is tuple of not
# old_move is guaranteed to be correct
if type(current_move) is not tuple:
return False
if len(current_move) != 2:
return False
a = current_move[0]
b = current_move[1]
if type(a) is not int or type(b) is not int:
return False
if a < 0 or a > 8 or b < 0 or b > 8:
return False
#Special case at start of game, any move is okay!
if old_move[0] == -1 and old_move[1] == -1:
return True
for_corner = [0,2,3,5,6,8]
#List of permitted blocks, based on old move.
blocks_allowed = []
if old_move[0] in for_corner and old_move[1] in for_corner:
## we will have 3 representative blocks, to choose from
if old_move[0] % 3 == 0 and old_move[1] % 3 == 0:
## top left 3 blocks are allowed
blocks_allowed = [0, 1, 3]
elif old_move[0] % 3 == 0 and old_move[1] in [2, 5, 8]:
## top right 3 blocks are allowed
blocks_allowed = [1,2,5]
elif old_move[0] in [2,5, 8] and old_move[1] % 3 == 0:
## bottom left 3 blocks are allowed
blocks_allowed = [3,6,7]
elif old_move[0] in [2,5,8] and old_move[1] in [2,5,8]:
### bottom right 3 blocks are allowed
blocks_allowed = [5,7,8]
else:
print "SOMETHING REALLY WEIRD HAPPENED!"
sys.exit(1)
else:
#### we will have only 1 block to choose from (or maybe NONE of them, which calls for a free move)
if old_move[0] % 3 == 0 and old_move[1] in [1,4,7]:
## upper-center block
blocks_allowed = [1]
elif old_move[0] in [1,4,7] and old_move[1] % 3 == 0:
## middle-left block
blocks_allowed = [3]
elif old_move[0] in [2,5,8] and old_move[1] in [1,4,7]:
## lower-center block
blocks_allowed = [7]
elif old_move[0] in [1,4,7] and old_move[1] in [2,5,8]:
## middle-right block
blocks_allowed = [5]
elif old_move[0] in [1,4,7] and old_move[1] in [1,4,7]:
blocks_allowed = [4]
# We get all the empty cells in allowed blocks. If they're all full, we get all the empty cells in the entire board.
cells = get_empty_out_of(game_board, blocks_allowed)
#Checks if you made a valid move.
if current_move in cells:
return True
else:
return False
def update_lists(game_board, block_stat, move_ret, fl):
#move_ret has the move to be made, so we modify the game_board, and then check if we need to modify block_stat
game_board[move_ret[0]][move_ret[1]] = fl
#print "@@@@@@@@@@@@@@@@@"
#print block_stat
block_no = (move_ret[0]/3)*3 + move_ret[1]/3
id1 = block_no/3
id2 = block_no%3
mg = 0
mflg = 0
if block_stat[block_no] == '-':
### now for diagonals
## D1
# ^
# ^
# ^
if game_board[id1*3][id2*3] == game_board[id1*3+1][id2*3+1] and game_board[id1*3+1][id2*3+1] == game_board[id1*3+2][id2*3+2] and game_board[id1*3+1][id2*3+1] != '-':
mflg=1
mg=1
#print "SEG: D1 found"
## D2
# ^
# ^
# ^
############ MODIFICATION HERE, in second condition -> gb[id1*3][id2*3+2]
# if game_board[id1*3+2][id2*3] == game_board[id1*3+1][id2*3+1] and game_board[id1*3+1][id2*3+1] == game_board[id1*3+2][id2*3] and game_board[id1*3+1][id2*3+1] != '-':
if game_board[id1*3+2][id2*3] == game_board[id1*3+1][id2*3+1] and game_board[id1*3+1][id2*3+1] == game_board[id1*3][id2*3 + 2] and game_board[id1*3+1][id2*3+1] != '-':
mflg=1
mg=1
#print "SEG: D2 found"
### col-wise
if mflg != 1:
for i in range(id2*3,id2*3+3):
#### MODIFICATION HERE, [i] was missing previously
# if game_board[id1*3]==game_board[id1*3+1] and game_board[id1*3+1] == game_board[id1*3+2] and game_board[id1*3] != '-':
if game_board[id1*3][i]==game_board[id1*3+1][i] and game_board[id1*3+1][i] == game_board[id1*3+2][i] and game_board[id1*3][i] != '-':
mflg = 1
#print "SEG: Col found"
break
### row-wise
if mflg != 1:
for i in range(id1*3,id1*3+3):
### MODIFICATION HERE, [i] was missing previously
#if game_board[id2*3]==game_board[id2*3+1] and game_board[id2*3+1] == game_board[id2*3+2] and game_board[id2*3] != '-':
if game_board[i][id2*3]==game_board[i][id2*3+1] and game_board[i][id2*3+1] == game_board[i][id2*3+2] and game_board[i][id2*3] != '-':
mflg = 1
#print "SEG: Row found"
break
if mflg == 1:
block_stat[block_no] = fl
#print
#print block_stat
#print "@@@@@@@@@@@@@@@@@@@@@@@"
return mg
#Check win
def terminal_state_reached(game_board, block_stat,point1,point2):
### we are now concerned only with block_stat
bs = block_stat
## Row win
if (bs[0] == bs[1] and bs[1] == bs[2] and bs[1]!='-') or (bs[3]!='-' and bs[3] == bs[4] and bs[4] == bs[5]) or (bs[6]!='-' and bs[6] == bs[7] and bs[7] == bs[8]):
return True, 'W'
## Col win
elif (bs[0] == bs[3] and bs[3] == bs[6] and bs[0]!='-') or (bs[1] == bs[4] and bs[4] == bs[7] and bs[4]!='-') or (bs[2] == bs[5] and bs[5] == bs[8] and bs[5]!='-'):
return True, 'W'
## Diag win
elif (bs[0] == bs[4] and bs[4] == bs[7] and bs[0]!='-') or (bs[2] == bs[4] and bs[4] == bs[6] and bs[2]!='-'):
return True, 'W'
else:
smfl = 0
for i in range(9):
for j in range(9):
if game_board[i][j] == '-':
smfl = 1
break
if smfl == 1:
return False, 'Continue'
else:
##### check of number of DIAGONALs
if point1>point2:
return True, 'P1'
elif point2>point1:
return True, 'P2'
else:
return True, 'D'
def decide_winner_and_get_message(player,status, message):
if status == 'P1':
return ('P1', 'MORE DIAGONALS')
elif status == 'P2':
return ('P2', 'MORE DIAGONALS')
elif player == 'P1' and status == 'L':
return ('P2',message)
elif player == 'P1' and status == 'W':
return ('P1',message)
elif player == 'P2' and status == 'L':
return ('P1',message)
elif player == 'P2' and status == 'W':
return ('P2',message)
else:
return ('NONE','DRAW')
return
def print_lists(gb, bs):
print '=========== Game Board ==========='
for i in range(9):
if i > 0 and i % 3 == 0:
print
for j in range(9):
if j > 0 and j % 3 == 0:
print " " + gb[i][j],
else:
print gb[i][j],
print
print "=================================="
print "=========== Block Status ========="
for i in range(0, 9, 3):
print bs[i] + " " + bs[i+1] + " " + bs[i+2]
print "=================================="
print
def simulate(obj1,obj2):
# game board is a 9x9 list, block_stat is a 1D list of 9 elements
game_board, block_stat = get_init_board_and_blockstatus()
#########
# deciding player1 / player2 after a coin toss
pl1 = obj1
pl2 = obj2
### basically, player with flag 'x' will start the game
pl1_fl = 'x'
pl2_fl = 'o'
old_move = (-1, -1) ### for the first move
WINNER = ''
MESSAGE = ''
TIMEALLOWED = 60
p1_pts=0
p2_pts=0
#### printing
print_lists(game_board, block_stat)
while(1):
######################################
########### firstly pl1 will move
######################################
## just for checking that the player1 does not modify the contents of the 2 lists
temp_board_state = game_board[:]
temp_block_stat = block_stat[:]
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEALLOWED)
# Player1 to complete in TIMEALLOWED secs.
try:
ret_move_pl1 = pl1.move(temp_board_state, temp_block_stat, old_move, pl1_fl)
except TimedOutExc as e:
WINNER, MESSAGE = decide_winner_and_get_message('P1', 'L', 'TIMED OUT')
break
### MODIFICATION!!
signal.alarm(0)
#### check if both lists are the same!!
if not (verification_fails_board(game_board, temp_board_state) and verification_fails_block(block_stat, temp_block_stat)):
##player1 loses - he modified something
WINNER, MESSAGE = decide_winner_and_get_message('P1', 'L', 'MODIFIED CONTENTS OF LISTS')
break
### now check if the returned move is valid
if not check_valid_move(game_board, ret_move_pl1, old_move):
## player1 loses - he made the wrong move.
WINNER, MESSAGE = decide_winner_and_get_message('P1', 'L', 'MADE AN INVALID MOVE')
break
print "Player 1 made the move:", ret_move_pl1, 'with', pl1_fl
######## So if the move is valid, we update the 'game_board' and 'block_stat' lists with move of pl1
p1_pts += update_lists(game_board, block_stat, ret_move_pl1, pl1_fl)
### now check if the last move resulted in a terminal state
gamestatus, mesg = terminal_state_reached(game_board, block_stat,p1_pts,p2_pts)
if gamestatus == True:
print_lists(game_board, block_stat)
WINNER, MESSAGE = decide_winner_and_get_message('P1', mesg, 'COMPLETE')
break
old_move = ret_move_pl1
print_lists(game_board, block_stat)
############################################
### Now player2 plays
###########################################
## just for checking that the player2 does not modify the contents of the 2 lists
temp_board_state = game_board[:]
temp_block_stat = block_stat[:]
signal.signal(signal.SIGALRM, handler)
signal.alarm(TIMEALLOWED)
# Player2 to complete in TIMEALLOWED secs.
try:
ret_move_pl2 = pl2.move(temp_board_state, temp_block_stat, old_move, pl2_fl)
except TimedOutExc as e:
WINNER, MESSAGE = decide_winner_and_get_message('P2', 'L', 'TIMED OUT')
break
signal.alarm(0)
#### check if both lists are the same!!
if not (verification_fails_board(game_board, temp_board_state) and verification_fails_block(block_stat, temp_block_stat)):
##player2 loses - he modified something
WINNER, MESSAGE = decide_winner_and_get_message('P2', 'L', 'MODIFIED CONTENTS OF LISTS')
break
### now check if the returned move is valid
if not check_valid_move(game_board, ret_move_pl2, old_move):
## player2 loses - he made the wrong move...
WINNER, MESSAGE = decide_winner_and_get_message('P2', 'L', 'MADE AN INVALID MOVE')
break
print "Player 2 made the move:", ret_move_pl2, 'with', pl2_fl
######## So if the move is valid, we update the 'game_board' and 'block_stat' lists with the move of P2
p2_pts += update_lists(game_board, block_stat, ret_move_pl2, pl2_fl)
### now check if the last move resulted in a terminal state
gamestatus, mesg = terminal_state_reached(game_board, block_stat,p1_pts,p2_pts)
if gamestatus == True:
print_lists(game_board, block_stat)
WINNER, MESSAGE = decide_winner_and_get_message('P2', mesg, 'COMPLETE' )
break
### otherwise CONTINUE
old_move = ret_move_pl2
print_lists(game_board, block_stat)
print p1_pts
print p2_pts
print WINNER
print MESSAGE
# return WINNER, MESSAGE, p1_pt2, p2_pt2
if __name__ == '__main__':
## get game playing objects
if len(sys.argv) != 2:
print 'Usage: python simulator.py <option>'
print '<option> can be 1 => Random player vs. Random player'
print ' 2 => Human vs. Random Player'
print ' 3 => Human vs. Human'
sys.exit(1)
obj1 = ''
obj2 = ''
option = sys.argv[1]
if option == '1':
obj1 = Player1()
obj2 = Player2()
elif option == '2':
obj1 = Player1()
obj2 = Manual_player()
elif option == '3':
obj1 = Manual_player()
obj2 = Manual_player()
#########
# deciding player1 / player2 after a coin toss
num = random.uniform(0,1)
interchange = 0
if num > 0.5:
interchange = 1
simulate(obj2, obj1)
else:
simulate(obj1, obj2)
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import crypt
import filecmp
import os
import re
import time
import xml.etree.ElementTree as ET
import custom_params
import hawq_constants
import utils
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.core.resources.accounts import Group, User
from resource_management.core.resources.system import Execute, Directory, File
from resource_management.core.shell import call
from resource_management.core.system import System
from resource_management.libraries.functions.default import default
def setup_user():
"""
Creates HAWQ user home directory and sets up the correct ownership.
"""
__create_hawq_user()
__create_hawq_user_secured()
__set_home_dir_ownership()
def __create_hawq_user():
"""
Creates HAWQ user with password and default group.
"""
import params
Group(hawq_constants.hawq_group, ignore_failures=True)
User(hawq_constants.hawq_user,
gid=hawq_constants.hawq_group,
password=crypt.crypt(params.hawq_password, "$1$salt$"),
groups=[hawq_constants.hawq_group, params.user_group],
ignore_failures=True)
def __create_hawq_user_secured():
"""
Creates HAWQ secured headless user belonging to hadoop group.
"""
import params
Group(hawq_constants.hawq_group_secured, ignore_failures=True)
User(hawq_constants.hawq_user_secured,
gid=hawq_constants.hawq_group_secured,
groups=[hawq_constants.hawq_group_secured, params.user_group],
ignore_failures=True)
def create_master_dir(dir_path):
"""
Creates the master directory (hawq_master_dir or hawq_segment_dir) for HAWQ
"""
utils.create_dir_as_hawq_user(dir_path)
Execute("chmod 700 {0}".format(dir_path), user=hawq_constants.root_user, timeout=hawq_constants.default_exec_timeout)
def create_temp_dirs(dir_paths):
"""
Creates the temp directories (hawq_master_temp_dir or hawq_segment_temp_dir) for HAWQ
"""
for path in dir_paths.split(','):
if path != "":
utils.create_dir_as_hawq_user(path)
def __set_home_dir_ownership():
"""
Updates the HAWQ user home directory to be owned by gpadmin:gpadmin.
"""
command = "chown -R {0}:{1} {2}".format(hawq_constants.hawq_user, hawq_constants.hawq_group, hawq_constants.hawq_home_dir)
Execute(command, timeout=hawq_constants.default_exec_timeout)
def setup_common_configurations():
"""
Sets up the config files common to master, standby and segment nodes.
"""
import params
params.XmlConfig("hdfs-client.xml",
configurations=params.hdfs_client,
configuration_attributes=params.config_attrs['hdfs-client'])
params.XmlConfig("yarn-client.xml",
configurations=params.yarn_client,
configuration_attributes=params.config_attrs['yarn-client'])
params.XmlConfig("hawq-site.xml",
configurations=params.hawq_site,
configuration_attributes=params.config_attrs['hawq-site'])
__set_osparams()
def __set_osparams():
"""
Updates parameters in sysctl.conf and limits.conf required by HAWQ.
"""
# Create a temp scratchpad directory
utils.create_dir_as_hawq_user(hawq_constants.hawq_tmp_dir)
# Suse doesn't supports loading values from files in /etc/sysctl.d
# So we will have to directly edit the sysctl file
if System.get_instance().os_family == "suse":
# Update /etc/sysctl.conf
__update_sysctl_file_suse()
else:
# Update /etc/sysctl.d/hawq.conf
__update_sysctl_file()
__update_limits_file()
def __update_limits_file():
"""
Updates /etc/security/limits.d/hawq.conf file with the HAWQ parameters.
"""
import params
# Ensure limits directory exists
Directory(hawq_constants.limits_conf_dir, create_parents = True, owner=hawq_constants.root_user, group=hawq_constants.root_user)
# Generate limits for hawq user
limits_file_content = "#### HAWQ Limits Parameters ###########\n"
for key, value in params.hawq_limits.iteritems():
if not __valid_input(value):
raise Exception("Value {0} for parameter {1} contains non-numeric characters which are not allowed (except whitespace), please fix the value and retry".format(value, key))
"""
Content of the file to be written should be of the format
gpadmin soft nofile 290000
gpadmin hard nofile 290000
key used in the configuration is of the format soft_nofile, thus strip '_' and replace with 'space'
"""
limits_file_content += "{0} {1} {2}\n".format(hawq_constants.hawq_user, re.sub("_", " ", key), value.strip())
File('{0}/{1}.conf'.format(hawq_constants.limits_conf_dir, hawq_constants.hawq_user), content=limits_file_content,
owner=hawq_constants.hawq_user, group=hawq_constants.hawq_group)
def __valid_input(value):
"""
Validate if input value contains number (whitespaces allowed), return true if found else false
"""
return re.search("^ *[0-9][0-9 ]*$", value)
def __convert_sysctl_dict_to_text():
"""
Convert sysctl configuration dict to text with each property value pair separated on new line
"""
import params
sysctl_file_content = "### HAWQ System Parameters ###########\n"
for key, value in params.hawq_sysctl.iteritems():
if not __valid_input(value):
raise Exception("Value {0} for parameter {1} contains non-numeric characters which are not allowed (except whitespace), please fix the value and retry".format(value, key))
sysctl_file_content += "{0} = {1}\n".format(key, value)
return sysctl_file_content
def __update_sysctl_file():
"""
Updates /etc/sysctl.d/hawq_sysctl.conf file with the HAWQ parameters on CentOS/RHEL.
"""
# Ensure sys ctl sub-directory exists
Directory(hawq_constants.sysctl_conf_dir, create_parents = True, owner=hawq_constants.root_user, group=hawq_constants.root_user)
# Generate temporary file with kernel parameters needed by hawq
File(hawq_constants.hawq_sysctl_tmp_file, content=__convert_sysctl_dict_to_text(), owner=hawq_constants.hawq_user,
group=hawq_constants.hawq_group)
is_changed = True
if os.path.exists(hawq_constants.hawq_sysctl_tmp_file) and os.path.exists(hawq_constants.hawq_sysctl_file):
is_changed = not filecmp.cmp(hawq_constants.hawq_sysctl_file, hawq_constants.hawq_sysctl_tmp_file)
if is_changed:
# Generate file with kernel parameters needed by hawq, only if something
# has been changed by user
Execute("cp -p {0} {1}".format(hawq_constants.hawq_sysctl_tmp_file, hawq_constants.hawq_sysctl_file))
# Reload kernel sysctl parameters from hawq file.
Execute("sysctl -e -p {0}".format(hawq_constants.hawq_sysctl_file), timeout=hawq_constants.default_exec_timeout)
# Wipe out temp file
File(hawq_constants.hawq_sysctl_tmp_file, action='delete')
def __update_sysctl_file_suse():
"""
Updates /etc/sysctl.conf file with the HAWQ parameters on SUSE.
"""
# Backup file
backup_file_name = hawq_constants.sysctl_backup_file.format(str(int(time.time())))
try:
# Generate file with kernel parameters needed by hawq to temp file
File(hawq_constants.hawq_sysctl_tmp_file, content=__convert_sysctl_dict_to_text(), owner=hawq_constants.hawq_user,
group=hawq_constants.hawq_group)
sysctl_file_dict = utils.read_file_to_dict(hawq_constants.sysctl_suse_file)
sysctl_file_dict_original = sysctl_file_dict.copy()
hawq_sysctl_dict = utils.read_file_to_dict(hawq_constants.hawq_sysctl_tmp_file)
# Merge common system file with hawq specific file
sysctl_file_dict.update(hawq_sysctl_dict)
if sysctl_file_dict_original != sysctl_file_dict:
# Backup file
Execute("cp {0} {1}".format(hawq_constants.sysctl_suse_file, backup_file_name), timeout=hawq_constants.default_exec_timeout)
# Write merged properties to file
utils.write_dict_to_file(sysctl_file_dict, hawq_constants.sysctl_suse_file)
# Reload kernel sysctl parameters from /etc/sysctl.conf
Execute("sysctl -e -p", timeout=hawq_constants.default_exec_timeout)
except Exception as e:
Logger.error("Error occurred while updating sysctl.conf file, reverting the contents" + str(e))
Execute("cp {0} {1}".format(hawq_constants.sysctl_suse_file, hawq_constants.hawq_sysctl_tmp_file))
Execute("mv {0} {1}".format(backup_file_name, hawq_constants.sysctl_suse_file), timeout=hawq_constants.default_exec_timeout)
Logger.error("Please execute `sysctl -e -p` on the command line manually to reload the contents of file {0}".format(
hawq_constants.hawq_sysctl_tmp_file))
raise Fail("Failed to update sysctl.conf file ")
def get_local_hawq_site_property_value(property_name):
"""
Fetches the value of the property specified, from the local hawq-site.xml.
"""
hawq_site_path = None
try:
hawq_site_path = os.path.join(hawq_constants.hawq_config_dir, "hawq-site.xml")
hawq_site_root = ET.parse(hawq_site_path).getroot()
for property in hawq_site_root.findall("property"):
for item in property:
if item.tag == 'name':
current_property_name = item.text.strip() if item and item.text else item.text
elif item.tag == 'value':
current_property_value = item.text.strip() if item and item.text else item.text
if property_name == current_property_name:
return current_property_value
raise #If property has not been found
except Exception:
raise Fail("Unable to read property {0} from local {1}".format(property_name, hawq_site_path))
def validate_configuration():
"""
Validates if YARN is present in the configuration when the user specifies YARN as HAWQ's resource manager.
"""
import params
# At this point, hawq should be included.
if 'hawq-site' not in params.config['configurations']:
raise Fail("Configurations does not contain hawq-site. Please include HAWQ")
# If HAWQ is set to use YARN and YARN is not configured, error.
rm_type = params.config["configurations"]["hawq-site"].get("hawq_global_rm_type")
if rm_type == "yarn" and "yarn-site" not in params.config["configurations"]:
raise Fail("HAWQ is set to use YARN but YARN is not deployed. " +
"hawq_global_rm_type property in hawq-site is set to 'yarn' but YARN is not configured. " +
"Please deploy YARN before starting HAWQ or change the value of hawq_global_rm_type property to 'none'")
def start_component(component_name, port, data_dir):
"""
If data directory exists start the component, else initialize the component.
Initialization starts the component
"""
import params
__check_dfs_truncate_enforced()
if component_name == hawq_constants.MASTER:
# Check the owner for hawq_data directory
data_dir_owner = hawq_constants.hawq_user_secured if params.security_enabled else hawq_constants.hawq_user
# Change owner recursively (if needed)
if __get_hdfs_dir_owner() != data_dir_owner:
params.HdfsResource(params.hawq_hdfs_data_dir,
type="directory",
action="create_on_execute",
owner=data_dir_owner,
group=hawq_constants.hawq_group,
recursive_chown=True,
mode=0755)
params.HdfsResource(None, action="execute")
options_str = "{0} -a -v".format(component_name)
if os.path.exists(os.path.join(data_dir, hawq_constants.postmaster_opts_filename)):
return utils.exec_hawq_operation(hawq_constants.START, options_str,
not_if=utils.generate_hawq_process_status_cmd(component_name, port))
# Initialize HAWQ
if component_name == hawq_constants.MASTER:
utils.exec_hawq_operation(hawq_constants.INIT, options_str + " --ignore-bad-hosts")
utils.exec_psql_cmd('create database {0};'.format(hawq_constants.hawq_user),
params.hawqmaster_host, params.hawq_master_address_port, ignore_error=True)
else:
utils.exec_hawq_operation(hawq_constants.INIT, options_str)
def stop_component(component_name, mode):
"""
Stops the component
Unlike start_component, port is obtained from local hawq-site.xml as Ambari pontentially have a new value through UI.
"""
port_property_name = hawq_constants.COMPONENT_ATTRIBUTES_MAP[component_name]['port_property']
port_number = get_local_hawq_site_property_value(port_property_name)
utils.exec_hawq_operation(hawq_constants.STOP,
"{0} -M {1} -a -v".format(component_name, mode),
only_if=utils.generate_hawq_process_status_cmd(component_name, port_number))
def __get_hdfs_dir_owner():
import params
# Check the owner for hawq_data directory
kinit_cmd = "{0} -kt {1} {2};".format(params.kinit_path_local, params.hdfs_user_keytab, params.hdfs_principal_name) if params.security_enabled else ""
cmd = kinit_cmd + "hdfs dfs -ls {0} | sed '1d;s/ */ /g' | cut -d\\ -f3".format(params.hawq_hdfs_data_dir)
returncode, stdout = call(cmd, user=params.hdfs_superuser, timeout=300)
if returncode:
raise Fail("Unable to determine the ownership for HDFS dir {0}".format(params.hawq_hdfs_data_dir))
return stdout.strip()
def __check_dfs_truncate_enforced():
"""
If enforce_hdfs_truncate is set to True:
throw an ERROR, HAWQ components start should fail
Else:
throw a WARNING,
"""
DFS_ALLOW_TRUNCATE_WARNING_MSG = "It is recommended to set dfs.allow.truncate as true in hdfs-site.xml configuration file, currently it is set to false. Please review HAWQ installation guide for more information."
# Check if dfs.allow.truncate exists in hdfs-site.xml and throw appropriate exception if not set to True
dfs_allow_truncate = default("/configurations/hdfs-site/dfs.allow.truncate", None)
if dfs_allow_truncate is None or str(dfs_allow_truncate).lower() != 'true':
if custom_params.enforce_hdfs_truncate:
raise Fail("**ERROR**: {0}".format(DFS_ALLOW_TRUNCATE_WARNING_MSG))
else:
Logger.error("**WARNING**: {0}".format(DFS_ALLOW_TRUNCATE_WARNING_MSG))
|
|
"""
@author: Fyzel@users.noreply.github.com
@copyright: 2017 Englesh.org. All rights reserved.
@license: https://github.com/Fyzel/weather-data-flaskapi/blob/master/LICENSE
@contact: Fyzel@users.noreply.github.com
@deffield updated: 2017-06-14
"""
from flask_restplus import fields
from api.restplus import api
humidity = api.model(
'Humidity',
{
'id': fields.Integer(
readOnly=True,
description='The unique identifier of the record'),
'value': fields.Float(
required=True,
readOnly=True,
description='The reading''s value'),
'value_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the value (e.g. percent)'),
'value_error_range': fields.Float(
required=True,
readOnly=True,
description='The error range for the reading''s value'),
'latitude': fields.Float(
required=True,
readOnly=True,
description='The latitude of the reading'),
'latitude_public': fields.Float(
required=True,
readOnly=True,
description='The public latitude of the reading'),
'longitude': fields.Float(
required=True,
readOnly=True,
description='The longitude of the reading'),
'longitude_public': fields.Float(
required=True,
readOnly=True,
description='The public longitude of the reading'),
'city': fields.String(
required=True,
readOnly=True,
description='The record''s city.'),
'province': fields.String(
required=True,
readOnly=True,
description='The record''s province.'),
'country': fields.String(
required=True,
readOnly=True,
description='The record''s country.'),
'elevation': fields.Float(
required=True,
readOnly=True,
description='The record''s elevation.'),
'elevation_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the elevation (e.g. meters, feet)'),
'timestamp': fields.DateTime(
required=True,
readOnly=True,
description='The date and time the reading was recorded'),
})
public_humidity = api.model(
'Humidity',
{
'id': fields.Integer(
readOnly=True,
description='The unique identifier of the record'),
'value': fields.Float(
required=True,
readOnly=True,
description='The reading''s value'),
'value_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the value (e.g. percent)'),
'value_error_range': fields.Float(
required=True,
readOnly=True,
description='The error range for the reading''s value'),
'latitude_public': fields.Float(
required=True,
readOnly=True,
description='The public latitude of the reading'),
'longitude_public': fields.Float(
required=True,
readOnly=True,
description='The public longitude of the reading'),
'city': fields.String(
required=True,
readOnly=True,
description='The record''s city.'),
'province': fields.String(
required=True,
readOnly=True,
description='The record''s province.'),
'country': fields.String(
required=True,
readOnly=True,
description='The record''s country.'),
'timestamp': fields.DateTime(
required=True,
readOnly=True,
description='The date and time the reading was recorded'),
})
pressure = api.model(
'Pressure',
{
'id': fields.Integer(
readOnly=True,
description='The unique identifier of the record'),
'value': fields.Float(
required=True,
readOnly=True,
description='The reading''s value'),
'value_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the value (e.g. percent)'),
'value_error_range': fields.Float(
required=True,
readOnly=True,
description='The error range for the reading''s value'),
'latitude': fields.Float(
required=True,
readOnly=True,
description='The latitude of the reading'),
'latitude_public': fields.Float(
required=True,
readOnly=True,
description='The public latitude of the reading'),
'longitude': fields.Float(
required=True,
readOnly=True,
description='The longitude of the reading'),
'longitude_public': fields.Float(
required=True,
readOnly=True,
description='The public longitude of the reading'),
'city': fields.String(
required=True,
readOnly=True,
description='The record''s city.'),
'province': fields.String(
required=True,
readOnly=True,
description='The record''s province.'),
'country': fields.String(
required=True,
readOnly=True,
description='The record''s country.'),
'elevation': fields.Float(
required=True,
readOnly=True,
description='The record''s elevation.'),
'elevation_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the elevation (e.g. meters, feet)'),
'timestamp': fields.DateTime(
required=True,
readOnly=True,
description='The date and time the reading was recorded'),
})
public_pressure = api.model(
'Pressure',
{
'id': fields.Integer(
readOnly=True,
description='The unique identifier of the record'),
'value': fields.Float(
required=True,
readOnly=True,
description='The reading''s value'),
'value_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the value (e.g. percent)'),
'value_error_range': fields.Float(
required=True,
readOnly=True,
description='The error range for the reading''s value'),
'latitude_public': fields.Float(
required=True,
readOnly=True,
description='The public latitude of the reading'),
'longitude_public': fields.Float(
required=True,
readOnly=True,
description='The public longitude of the reading'),
'city': fields.String(
required=True,
readOnly=True,
description='The record''s city.'),
'province': fields.String(
required=True,
readOnly=True,
description='The record''s province.'),
'country': fields.String(
required=True,
readOnly=True,
description='The record''s country.'),
'timestamp': fields.DateTime(
required=True,
readOnly=True,
description='The date and time the reading was recorded'),
})
temperature = api.model(
'Temperature',
{
'id': fields.Integer(
readOnly=True,
description='The unique identifier of the record'),
'value': fields.Float(
required=True,
readOnly=True,
description='The reading''s value'),
'value_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the value (e.g. percent)'),
'value_error_range': fields.Float(
required=True,
readOnly=True,
description='The error range for the reading''s value'),
'latitude': fields.Float(
required=True,
readOnly=True,
description='The latitude of the reading'),
'latitude_public': fields.Float(
required=True,
readOnly=True,
description='The public latitude of the reading'),
'longitude': fields.Float(
required=True,
readOnly=True,
description='The longitude of the reading'),
'longitude_public': fields.Float(
required=True,
readOnly=True,
description='The public longitude of the reading'),
'city': fields.String(
required=True,
readOnly=True,
description='The record''s city.'),
'province': fields.String(
required=True,
readOnly=True,
description='The record''s province.'),
'country': fields.String(
required=True,
readOnly=True,
description='The record''s country.'),
'elevation': fields.Float(
required=True,
readOnly=True,
description='The record''s elevation.'),
'elevation_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the elevation (e.g. meters, feet)'),
'timestamp': fields.DateTime(
required=True,
readOnly=True,
description='The date and time the reading was recorded'),
})
public_temperature = api.model(
'Temperature',
{
'id': fields.Integer(
readOnly=True,
description='The unique identifier of the record'),
'value': fields.Float(
required=True,
readOnly=True,
description='The reading''s value'),
'value_units': fields.String(
required=True,
readOnly=True,
max=16,
description='The unit for the value (e.g. percent)'),
'value_error_range': fields.Float(
required=True,
readOnly=True,
description='The error range for the reading''s value'),
'latitude_public': fields.Float(
required=True,
readOnly=True,
description='The public latitude of the reading'),
'longitude_public': fields.Float(
required=True,
readOnly=True,
description='The public longitude of the reading'),
'city': fields.String(
required=True,
readOnly=True,
description='The record''s city.'),
'province': fields.String(
required=True,
readOnly=True,
description='The record''s province.'),
'country': fields.String(
required=True,
readOnly=True,
description='The record''s country.'),
'timestamp': fields.DateTime(
required=True,
readOnly=True,
description='The date and time the reading was recorded'),
})
|
|
# encoding: utf-8
"""
Axis-related chart objects.
"""
from __future__ import absolute_import, print_function, unicode_literals
from ..enum.chart import XL_TICK_LABEL_POSITION, XL_TICK_MARK
from ..oxml.ns import qn
from ..text.text import Font
from ..util import lazyproperty
class _BaseAxis(object):
"""
Base class for chart axis objects. All axis objects share these
properties.
"""
def __init__(self, xAx_elm):
super(_BaseAxis, self).__init__()
self._element = xAx_elm
@property
def has_major_gridlines(self):
"""
Read/write boolean value specifying whether this axis has gridlines
at its major tick mark locations. Assigning |True| to this property
causes major gridlines to be displayed. Assigning |False| causes them
to be removed.
"""
if self._element.majorGridlines is None:
return False
return True
@has_major_gridlines.setter
def has_major_gridlines(self, value):
if bool(value) is True:
self._element.get_or_add_majorGridlines()
else:
self._element._remove_majorGridlines()
@property
def has_minor_gridlines(self):
"""
Read/write boolean value specifying whether this axis has gridlines
at its minor tick mark locations. Assigning |True| to this property
causes minor gridlines to be displayed. Assigning |False| causes them
to be removed.
"""
if self._element.minorGridlines is None:
return False
return True
@has_minor_gridlines.setter
def has_minor_gridlines(self, value):
if bool(value) is True:
self._element.get_or_add_minorGridlines()
else:
self._element._remove_minorGridlines()
@property
def major_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of major tick
mark to display on this axis.
"""
majorTickMark = self._element.majorTickMark
if majorTickMark is None:
return XL_TICK_MARK.CROSS
return majorTickMark.val
@major_tick_mark.setter
def major_tick_mark(self, value):
self._element._remove_majorTickMark()
if value is XL_TICK_MARK.CROSS:
return
self._element._add_majorTickMark(val=value)
@property
def maximum_scale(self):
"""
Read/write float value specifying the upper limit of the value range
for this axis, the number at the top or right of the vertical or
horizontal value scale, respectively. The value |None| indicates the
upper limit should be determined automatically based on the range of
data point values associated with the axis.
"""
return self._element.scaling.maximum
@maximum_scale.setter
def maximum_scale(self, value):
scaling = self._element.scaling
scaling.maximum = value
@property
def minimum_scale(self):
"""
Read/write float value specifying lower limit of value range, the
number at the bottom or left of the value scale. |None| if no minimum
scale has been set. The value |None| indicates the lower limit should
be determined automatically based on the range of data point values
associated with the axis.
"""
return self._element.scaling.minimum
@minimum_scale.setter
def minimum_scale(self, value):
scaling = self._element.scaling
scaling.minimum = value
@property
def minor_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of minor tick
mark for this axis.
"""
minorTickMark = self._element.minorTickMark
if minorTickMark is None:
return XL_TICK_MARK.CROSS
return minorTickMark.val
@minor_tick_mark.setter
def minor_tick_mark(self, value):
self._element._remove_minorTickMark()
if value is XL_TICK_MARK.CROSS:
return
self._element._add_minorTickMark(val=value)
@lazyproperty
def tick_labels(self):
"""
The |TickLabels| instance providing access to axis tick label
formatting properties. Tick labels are the numbers appearing on
a value axis or the category names appearing on a category axis.
"""
return TickLabels(self._element)
@property
def tick_label_position(self):
"""
Read/write :ref:`XlTickLabelPosition` value specifying where the tick
labels for this axis should appear.
"""
tickLblPos = self._element.tickLblPos
if tickLblPos is None:
return XL_TICK_LABEL_POSITION.NEXT_TO_AXIS
if tickLblPos.val is None:
return XL_TICK_LABEL_POSITION.NEXT_TO_AXIS
return tickLblPos.val
@tick_label_position.setter
def tick_label_position(self, value):
tickLblPos = self._element.get_or_add_tickLblPos()
tickLblPos.val = value
@property
def visible(self):
"""
Read/write. |True| if axis is visible, |False| otherwise.
"""
delete = self._element.delete
if delete is None:
return False
return False if delete.val else True
@visible.setter
def visible(self, value):
if value not in (True, False):
raise ValueError(
"assigned value must be True or False, got: %s" % value
)
delete = self._element.get_or_add_delete()
delete.val = not value
class CategoryAxis(_BaseAxis):
"""
A category axis of a chart.
"""
class TickLabels(object):
"""
A service class providing access to formatting of axis tick mark labels.
"""
def __init__(self, xAx_elm):
super(TickLabels, self).__init__()
self._element = xAx_elm
@lazyproperty
def font(self):
"""
The |Font| object that provides access to the text properties for
these tick labels, such as bold, italic, etc.
"""
defRPr = self._element.defRPr
font = Font(defRPr)
return font
@property
def number_format(self):
"""
Read/write string (e.g. "$#,##0.00") specifying the format for the
numbers on this axis. The syntax for these strings is the same as it
appears in the PowerPoint or Excel UI. Returns 'General' if no number
format has been set. Note that this format string has no effect on
rendered tick labels when :meth:`number_format_is_linked` is |True|.
Assigning a format string to this property automatically sets
:meth:`number_format_is_linked` to |False|.
"""
numFmt = self._element.numFmt
if numFmt is None:
return 'General'
return numFmt.formatCode
@number_format.setter
def number_format(self, value):
numFmt = self._element.get_or_add_numFmt()
numFmt.formatCode = value
self.number_format_is_linked = False
@property
def number_format_is_linked(self):
"""
Read/write boolean specifying whether number formatting should be
taken from the source spreadsheet rather than the value of
:meth:`number_format`.
"""
numFmt = self._element.numFmt
if numFmt is None:
return False
souceLinked = numFmt.sourceLinked
if souceLinked is None:
return True
return numFmt.sourceLinked
@number_format_is_linked.setter
def number_format_is_linked(self, value):
numFmt = self._element.get_or_add_numFmt()
numFmt.sourceLinked = value
@property
def offset(self):
"""
Read/write int value in range 0-1000 specifying the spacing between
the tick mark labels and the axis as a percentange of the default
value. 100 if no label offset setting is present.
"""
lblOffset = self._element.lblOffset
if lblOffset is None:
return 100
return lblOffset.val
@offset.setter
def offset(self, value):
if self._element.tag != qn('c:catAx'):
raise ValueError('only a category axis has an offset')
self._element._remove_lblOffset()
if value == 100:
return
lblOffset = self._element._add_lblOffset()
lblOffset.val = value
class ValueAxis(_BaseAxis):
"""
A value axis of a chart.
"""
@property
def major_unit(self):
"""
The float number of units between major tick marks on this value
axis. |None| corresponds to the 'Auto' setting in the UI, and
specifies the value should be calculated by PowerPoint based on the
underlying chart data.
"""
majorUnit = self._element.majorUnit
if majorUnit is None:
return None
return majorUnit.val
@major_unit.setter
def major_unit(self, value):
self._element._remove_majorUnit()
if value is None:
return
self._element._add_majorUnit(val=value)
@property
def minor_unit(self):
"""
The float number of units between minor tick marks on this value
axis. |None| corresponds to the 'Auto' setting in the UI, and
specifies the value should be calculated by PowerPoint based on the
underlying chart data.
"""
minorUnit = self._element.minorUnit
if minorUnit is None:
return None
return minorUnit.val
@minor_unit.setter
def minor_unit(self, value):
self._element._remove_minorUnit()
if value is None:
return
self._element._add_minorUnit(val=value)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.http import HttpResponse # noqa
from django.template import defaultfilters as filters
from django.utils import html
from django.utils.http import urlencode
from django.utils import safestring
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
DELETABLE_STATES = ("available", "error", "error_extending")
class VolumePolicyTargetMixin(policy.PolicyTargetMixin):
policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),)
class LaunchVolume(tables.LinkAction):
name = "launch_volume"
verbose_name = _("Launch as Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if getattr(volume, 'bootable', '') == 'true':
return volume.status == "available"
return False
class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume",
u"Delete Volumes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume",
u"Scheduled deletion of Volumes",
count
)
policy_rules = (("volume", "volume:delete"),)
def delete(self, request, obj_id):
cinder.volume_delete(request, obj_id)
def allowed(self, request, volume=None):
if volume:
return (volume.status in DELETABLE_STATES and
not getattr(volume, 'has_snapshot', False))
return True
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("volume", "volume:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(CreateVolume, self).__init__(attrs, **kwargs)
def allowed(self, request, volume=None):
limits = api.cinder.tenant_absolute_limits(request)
gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf"))
- limits.get('totalGigabytesUsed', 0))
volumes_available = (limits.get('maxTotalVolumes', float("inf"))
- limits.get('totalVolumesUsed', 0))
if gb_available <= 0 or volumes_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Volume")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
return True
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "extend"
verbose_name = _("Extend Volume")
url = "horizon:project:volumes:volumes:extend"
classes = ("ajax-modal", "btn-extend")
policy_rules = (("volume", "volume:extend"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Manage Attachments")
url = "horizon:project:volumes:volumes:attach"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, volume=None):
if volume:
project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
attach_allowed = \
policy.check((("compute", "compute:attach_volume"),),
request,
{"project_id": project_id})
detach_allowed = \
policy.check((("compute", "compute:detach_volume"),),
request,
{"project_id": project_id})
if attach_allowed or detach_allowed:
return volume.status in ("available", "in-use")
return False
class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction):
name = "snapshots"
verbose_name = _("Create Snapshot")
url = "horizon:project:volumes:volumes:create_snapshot"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create_snapshot"),)
def allowed(self, request, volume=None):
try:
limits = api.cinder.tenant_absolute_limits(request)
except Exception:
exceptions.handle(request, _('Unable to retrieve tenant limits.'))
limits = {}
snapshots_available = (limits.get('maxTotalSnapshots', float("inf"))
- limits.get('totalSnapshotsUsed', 0))
if snapshots_available <= 0 and "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
return volume.status in ("available", "in-use")
class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction):
name = "backups"
verbose_name = _("Create Backup")
url = "horizon:project:volumes:volumes:create_backup"
classes = ("ajax-modal",)
policy_rules = (("volume", "backup:create"),)
def allowed(self, request, volume=None):
return (cinder.volume_backup_supported(request) and
volume.status == "available")
class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction):
name = "upload_to_image"
verbose_name = _("Upload to Image")
url = "horizon:project:volumes:volumes:upload_to_image"
classes = ("ajax-modal",)
icon = "cloud-upload"
policy_rules = (("volume", "volume:upload_to_image"),)
def allowed(self, request, volume=None):
has_image_service_perm = \
request.user.has_perm('openstack.services.image')
return (volume.status in ("available", "in-use") and
has_image_service_perm)
class EditVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Volume")
url = "horizon:project:volumes:volumes:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "retype"
verbose_name = _("Change Volume Type")
url = "horizon:project:volumes:volumes:retype"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:retype"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, volume_id):
volume = cinder.volume_get(request, volume_id)
return volume
def get_size(volume):
return _("%sGB") % volume.size
def get_attachment_name(request, attachment):
server_id = attachment.get("server_id", None)
if "instance" in attachment and attachment['instance']:
name = attachment["instance"].name
else:
try:
server = api.nova.server_get(request, server_id)
name = server.name
except Exception:
name = None
exceptions.handle(request, _("Unable to retrieve "
"attachment information."))
try:
url = reverse("horizon:project:instances:detail", args=(server_id,))
instance = '<a href="%s">%s</a>' % (url, html.escape(name))
except NoReverseMatch:
instance = name
return instance
class AttachmentColumn(tables.Column):
"""Customized column class.
So it that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, volume):
request = self.table.request
link = _('Attached to %(instance)s on %(dev)s')
attachments = []
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
# When a volume is attached it may return the server_id
# without the server name...
instance = get_attachment_name(request, attachment)
vals = {"instance": instance,
"dev": html.escape(attachment.get("device", ""))}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
("error_extending", False),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
filters=(filters.title,),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"))
attachments = AttachmentColumn("attachments",
verbose_name=_("Attached To"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"))
class Meta:
name = "volumes"
verbose_name = _("Volumes")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateVolume, DeleteVolume, VolumesFilterAction)
row_actions = (EditVolume, ExtendVolume, LaunchVolume, EditAttachments,
CreateSnapshot, CreateBackup, RetypeVolume,
UploadToImage, DeleteVolume)
class DetachVolume(tables.BatchAction):
name = "detach"
classes = ('btn-danger', 'btn-detach')
policy_rules = (("compute", "compute:detach_volume"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Volume",
u"Detach Volumes",
count
)
# This action is asynchronous.
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detaching Volume",
u"Detaching Volumes",
count
)
def action(self, request, obj_id):
attachment = self.table.get_object_by_id(obj_id)
api.nova.instance_volume_detach(request,
attachment.get('server_id', None),
obj_id)
def get_success_url(self, request):
return reverse('horizon:project:volumes:index')
class AttachedInstanceColumn(tables.Column):
"""Customized column class that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, attachment):
request = self.table.request
return safestring.mark_safe(get_attachment_name(request, attachment))
class AttachmentsTable(tables.DataTable):
instance = AttachedInstanceColumn(get_attachment_name,
verbose_name=_("Instance"))
device = tables.Column("device",
verbose_name=_("Device"))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, attachment):
instance_name = get_attachment_name(self.request, attachment)
vals = {"volume_name": attachment['volume_name'],
"instance_name": html.strip_tags(instance_name)}
return _("Volume %(volume_name)s on instance %(instance_name)s") % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta:
name = "attachments"
verbose_name = _("Attachments")
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
|
|
import unittest
import os
from textwrap import dedent
import numpy as np
from bltest import attr
from baiji import s3
from bltest import skip_if_unavailable, skip_on_import_error
from bltest.extra_asserts import ExtraAssertionsMixin
from unittest import mock
from lace.mesh import Mesh
from lace.serialization import obj
from lace.cache import sc
from lace.cache import vc
from .testing.scratch_dir import ScratchDirMixin
@attr('missing_assets')
class TestOBJBase(ExtraAssertionsMixin, unittest.TestCase):
def setUp(self):
import tempfile
self.tmp_dir = tempfile.mkdtemp('bodylabs-test')
self.truth = {
'box_v': np.array([[0.5, -0.5, 0.5, -0.5, 0.5, -0.5, 0.5, -0.5], [0.5, 0.5, -0.5, -0.5, 0.5, 0.5, -0.5, -0.5], [0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5]]).T,
'box_f': np.array([[0, 1, 2], [3, 2, 1], [0, 2, 4], [6, 4, 2], [0, 4, 1], [5, 1, 4], [7, 5, 6], [4, 6, 5], [7, 6, 3], [2, 3, 6], [7, 3, 5], [1, 5, 3]]),
'box_segm': {'a':np.array(range(6), dtype=np.uint32), 'b':np.array([6, 10, 11], dtype=np.uint32), 'c':np.array([7, 8, 9], dtype=np.uint32)},
'box_segm_overlapping': {'a':np.array(range(6), dtype=np.uint32), 'b':np.array([6, 10, 11], dtype=np.uint32), 'c':np.array([7, 8, 9], dtype=np.uint32), 'd':np.array([1, 2, 8], dtype=np.uint32)},
'landm': {'pospospos' : 0, 'negnegneg' : 7},
'landm_xyz': {'pospospos' : np.array([0.5, 0.5, 0.5]), 'negnegneg' : np.array([-0.5, -0.5, -0.5])},
}
self.test_obj_url = vc.uri('/unittest/serialization/obj/test_box_simple.obj')
self.test_obj_path = vc('/unittest/serialization/obj/test_box_simple.obj')
self.test_obj_with_vertex_colors_url = vc.uri('/unittest/serialization/obj/test_box_simple_with_vertex_colors.obj')
self.test_obj_with_landmarks_url = vc.uri('/unittest/serialization/obj/test_box.obj')
self.test_obj_with_landmarks_path = vc('/unittest/serialization/obj/test_box.obj')
self.test_pp_path = vc('/unittest/serialization/obj/test_box.pp')
self.test_obj_with_overlapping_groups_path = vc('/unittest/serialization/obj/test_box_with_overlapping_groups.obj')
self.obj_with_texure = "s3://bodylabs-korper-assets/is/ps/shared/data/body/korper_testdata/textured_mean_scape_female.obj"
self.obj_with_texure_mtl = "s3://bodylabs-korper-assets/is/ps/shared/data/body/korper_testdata/textured_mean_scape_female.mtl"
self.obj_with_texure_tex = "s3://bodylabs-korper-assets/is/ps/shared/data/body/korper_testdata/textured_mean_scape_female.png"
def tearDown(self):
import shutil
shutil.rmtree(self.tmp_dir, ignore_errors=True)
class TestOBJBasicLoading(TestOBJBase):
def test_loads_from_local_path_using_constructor(self):
m = Mesh(filename=self.test_obj_path)
self.assertTrue((m.v == self.truth['box_v']).all())
self.assertTrue((m.f == self.truth['box_f']).all())
self.assertDictOfArraysEqual(m.segm, self.truth['box_segm'])
self.assertEqual(m.materials_filepath, None)
def test_loads_from_local_path_using_serializer(self):
m = obj.load(self.test_obj_path)
self.assertTrue((m.v == self.truth['box_v']).all())
self.assertTrue((m.f == self.truth['box_f']).all())
self.assertDictOfArraysEqual(m.segm, self.truth['box_segm'])
def test_loads_from_remote_path_using_serializer(self):
skip_if_unavailable('s3')
m = obj.load(self.test_obj_url)
self.assertTrue((m.v == self.truth['box_v']).all())
self.assertTrue((m.f == self.truth['box_f']).all())
self.assertDictOfArraysEqual(m.segm, self.truth['box_segm'])
def test_loads_from_open_file_using_serializer(self):
with open(self.test_obj_path) as f:
m = obj.load(f)
self.assertTrue((m.v == self.truth['box_v']).all())
self.assertTrue((m.f == self.truth['box_f']).all())
self.assertDictOfArraysEqual(m.segm, self.truth['box_segm'])
def test_loading_brings_in_normals_and_uvs(self):
# This file is known to have vt, vn, and faces of the form 1/2/3
texture_template = 's3://bodylabs-korper-assets/is/ps/shared/data/body/template/texture_coordinates/textured_template_low_v2.obj'
mesh_with_texture = obj.load(sc(texture_template))
self.assertIsNotNone(mesh_with_texture.vt)
self.assertIsNotNone(mesh_with_texture.ft)
self.assertEqual(mesh_with_texture.vt.shape[1], 2)
self.assertEqual(mesh_with_texture.vt.shape[0], np.max(mesh_with_texture.ft)+1)
self.assertIsNotNone(mesh_with_texture.vn)
self.assertIsNotNone(mesh_with_texture.fn)
self.assertEqual(mesh_with_texture.vn.shape[1], 3)
self.assertEqual(mesh_with_texture.vn.shape[0], np.max(mesh_with_texture.fn)+1)
def test_loading_vertex_colors(self):
# Mesh without vertex colors should not have vertex colors
mesh_without_vertex_colors = obj.load(sc(self.test_obj_url))
self.assertIsNone(mesh_without_vertex_colors.vc)
# Mesh with vertex colors should have vertex colors
mesh_with_vertex_colors = obj.load(sc(self.test_obj_with_vertex_colors_url))
self.assertIsNotNone(mesh_with_vertex_colors.vc)
# Check sizes
vc_length, vc_size = mesh_with_vertex_colors.vc.shape
v_length, _ = mesh_with_vertex_colors.v.shape
self.assertEqual(vc_length, v_length)
self.assertEqual(vc_size, 3)
# Vertices should be the same
self.assertTrue((mesh_without_vertex_colors.v == mesh_with_vertex_colors.v).all())
class TestOBJWithLandmarks(TestOBJBase):
def test_loads_from_local_path_using_constructor_with_landmarks(self):
skip_on_import_error('lace-search')
m = Mesh(filename=self.test_obj_with_landmarks_path, ppfilename=self.test_pp_path)
self.assertTrue((m.v == self.truth['box_v']).all())
self.assertTrue((m.f == self.truth['box_f']).all())
self.assertEqual(m.landm, self.truth['landm'])
self.assertDictOfArraysEqual(m.landm_xyz, self.truth['landm_xyz'])
self.assertDictOfArraysEqual(m.segm, self.truth['box_segm'])
class TestOBJBasicWriting(TestOBJBase):
def test_writing_obj_locally_using_mesh_write_obj(self):
local_file = os.path.join(self.tmp_dir, "test_writing_ascii_obj_locally_using_mesh_write_ply.obj")
m = Mesh(filename=self.test_obj_path)
m.write_obj(local_file)
self.assertFilesEqual(local_file, self.test_obj_path)
def test_writing_obj_locally_using_serializer(self):
local_file = os.path.join(self.tmp_dir, "test_writing_ascii_obj_locally_using_serializer.obj")
m = Mesh(filename=self.test_obj_path)
obj.dump(m, local_file)
self.assertFilesEqual(local_file, self.test_obj_path)
class TestOBJWithMaterials(ScratchDirMixin, TestOBJBase):
def test_writing_obj_with_mtl(self):
local_file = os.path.join(self.tmp_dir, "test_writing_obj_with_mtl.obj")
m = obj.load(sc(self.obj_with_texure))
obj.dump(m, local_file)
self.assertTrue(s3.exists(os.path.splitext(local_file)[0] + '.mtl'))
self.assertTrue(s3.exists(os.path.splitext(local_file)[0] + '.png'))
@mock.patch('baiji.s3.open', side_effect=s3.open)
def test_reading_obj_with_mtl_from_local_file(self, mock_s3_open):
local_obj_with_texure = os.path.join(self.tmp_dir, os.path.basename(self.obj_with_texure))
local_obj_with_texure_mtl = os.path.join(self.tmp_dir, os.path.basename(self.obj_with_texure_mtl))
local_obj_with_texure_tex = os.path.join(self.tmp_dir, os.path.basename(self.obj_with_texure_tex))
s3.cp(sc(self.obj_with_texure), local_obj_with_texure)
s3.cp(sc(self.obj_with_texure_mtl), local_obj_with_texure_mtl)
s3.cp(sc(self.obj_with_texure_tex), local_obj_with_texure_tex)
m = obj.load(local_obj_with_texure)
mock_s3_open.assert_has_calls([
mock.call(local_obj_with_texure, 'rb'),
mock.call(local_obj_with_texure_mtl, 'r'),
])
self.assertEqual(m.materials_filepath, local_obj_with_texure_mtl)
self.assertEqual(m.texture_filepath, local_obj_with_texure_tex)
@mock.patch('baiji.s3.open', side_effect=s3.open)
# @mock.patch('baiji.pod.asset_cache.AssetCache.__call__', side_effect=sc.__call__)
def test_reading_obj_with_mtl_from_sc_file(self, mock_sc, mock_s3_open):
from baiji.pod.asset_cache import CacheFile
sc_obj_with_texure = self.obj_with_texure.replace("s3://bodylabs-korper-assets", '')
sc_obj_with_texure_mtl = self.obj_with_texure_mtl.replace("s3://bodylabs-korper-assets", '')
sc_obj_with_texure_tex = self.obj_with_texure_tex.replace("s3://bodylabs-korper-assets", '')
bucket = "bodylabs-korper-assets"
m = obj.load(sc(sc_obj_with_texure, bucket=bucket))
mock_sc.assert_has_calls([
mock.call(sc_obj_with_texure, bucket=bucket), # the one above
mock.call(CacheFile(sc, sc_obj_with_texure_mtl, bucket=bucket).local), # in obj.load
mock.call(CacheFile(sc, sc_obj_with_texure_tex, bucket=bucket).local), # in obj.load
])
mock_s3_open.assert_has_calls([
mock.call(sc(sc_obj_with_texure, bucket=bucket), 'rb'),
mock.call(sc(sc_obj_with_texure_mtl, bucket=bucket), 'r'),
])
self.assertEqual(m.materials_filepath, sc(sc_obj_with_texure_mtl, bucket=bucket))
self.assertEqual(m.texture_filepath, sc(sc_obj_with_texure_tex, bucket=bucket))
@mock.patch('baiji.s3.open', side_effect=s3.open)
def test_reading_obj_with_mtl_from_s3_url(self, mock_s3_open):
skip_if_unavailable('s3')
m = obj.load(self.obj_with_texure)
mock_s3_open.assert_has_calls([
mock.call(self.obj_with_texure, 'rb'),
mock.call(self.obj_with_texure_mtl, 'r'),
])
self.assertEqual(m.materials_filepath, self.obj_with_texure_mtl)
self.assertEqual(m.texture_filepath, self.obj_with_texure_tex)
self.assertIsNotNone(m.texture_image)
def test_changing_texture_filepath(self):
m = obj.load(self.obj_with_texure)
self.assertEqual(m.texture_filepath, self.obj_with_texure_tex)
self.assertIsNotNone(m.texture_image)
m.texture_filepath = None
self.assertIsNone(m.texture_image)
def create_texture_test_files(self, include_Ka=False, include_Kd=False):
obj_path = os.path.join(self.scratch_dir, 'texture_test.obj')
with open(obj_path, 'w') as f:
f.write('mtllib {}\n'.format('texture_test.mtl'))
mtl_path = os.path.join(self.scratch_dir, 'texture_test.mtl')
with open(mtl_path, 'w') as f:
if include_Ka:
f.write('map_Ka {}\n'.format('ambient_tex.png'))
if include_Kd:
f.write('map_Kd {}\n'.format('diffuse_tex.png'))
return obj_path
def test_texture_reads_Ka(self):
obj_path = self.create_texture_test_files(include_Ka=True)
m = obj.load(obj_path)
self.assertEqual(m.texture_filepath, os.path.join(self.scratch_dir, 'ambient_tex.png'))
def test_texture_reads_Kd(self):
obj_path = self.create_texture_test_files(include_Kd=True)
m = obj.load(obj_path)
self.assertEqual(m.texture_filepath, os.path.join(self.scratch_dir, 'diffuse_tex.png'))
def test_texture_reads_Ka_if_both_Ka_and_Kd_are_present(self):
obj_path = self.create_texture_test_files(include_Ka=True, include_Kd=True)
m = obj.load(obj_path)
self.assertEqual(m.texture_filepath, os.path.join(self.scratch_dir, 'ambient_tex.png'))
@mock.patch('baiji.s3.open', side_effect=s3.open)
def test_reading_obj_with_mtl_from_absolute_path(self, mock_s3_open):
# This is generally a very bad idea; it makes it hard to move an obj around
skip_if_unavailable('s3')
obj_path = os.path.join(self.scratch_dir, 'abs_path_to_mtl.obj')
mlt_path = os.path.join(self.scratch_dir, 'abs_path_to_mtl.mlt')
tex_path = os.path.abspath(sc(self.obj_with_texure_tex))
with open(obj_path, 'w') as f:
f.write('mtllib {}\n'.format(mlt_path))
with open(mlt_path, 'w') as f:
f.write('map_Ka {}\n'.format(tex_path))
m = obj.load(obj_path)
mock_s3_open.assert_has_calls([
mock.call(obj_path, 'rb'),
mock.call(mlt_path, 'r'),
])
self.assertEqual(m.materials_filepath, mlt_path)
self.assertEqual(m.texture_filepath, tex_path)
@mock.patch('baiji.s3.open', side_effect=s3.open)
def test_reading_obj_with_mtl_from_missing_absolute_path(self, mock_s3_open):
# If an absolute path is given and the file is missing, try looking in the same directory;
# this lets you find the most common intention when an abs path is used.
skip_if_unavailable('s3')
obj_path = os.path.join(self.scratch_dir, 'abs_path_to_missing_mtl.obj')
real_mlt_path = os.path.join(self.scratch_dir, 'abs_path_to_missing_mtl.mlt')
arbitrary_mlt_path = os.path.join(self.scratch_dir, 'some_other_absolute_path', 'abs_path_to_missing_mtl.mlt')
tex_path = os.path.abspath(sc(self.obj_with_texure_tex))
with open(obj_path, 'w') as f:
f.write('mtllib {}\n'.format(arbitrary_mlt_path))
with open(real_mlt_path, 'w') as f:
f.write('map_Ka {}\n'.format(tex_path))
m = obj.load(obj_path)
mock_s3_open.assert_has_calls([
mock.call(obj_path, 'rb'),
mock.call(real_mlt_path, 'r'),
])
self.assertEqual(m.materials_filepath, real_mlt_path)
self.assertEqual(m.texture_filepath, tex_path)
@mock.patch('baiji.s3.open', side_effect=s3.open)
def test_reading_obj_with_mtl_from_missing_windows_absolute_path(self, mock_s3_open):
# In this case, we're given a windows absolute path, which it totally wrong, but if there happens
# to be a mtl file of the right name in the same dir as the obj, go for it.
# This is a signiicant case, because 3dMD outputs mtllib this way.
skip_if_unavailable('s3')
obj_path = os.path.join(self.scratch_dir, 'abs_path_to_missing_windows_mtl.obj')
real_mlt_path = os.path.join(self.scratch_dir, 'abs_path_to_missing_windows_mtl.mlt')
arbitrary_mlt_path = 'C:/Users/ARGH/Documents/I-Did_some_scans/Subject_47/abs_path_to_missing_windows_mtl.mlt'
tex_path = os.path.abspath(sc(self.obj_with_texure_tex))
with open(obj_path, 'w') as f:
f.write('mtllib {}\n'.format(arbitrary_mlt_path))
with open(real_mlt_path, 'w') as f:
f.write('map_Ka {}\n'.format(tex_path))
m = obj.load(obj_path)
mock_s3_open.assert_has_calls([
mock.call(obj_path, 'rb'),
mock.call(real_mlt_path, 'r'),
])
self.assertEqual(m.materials_filepath, real_mlt_path)
self.assertEqual(m.texture_filepath, tex_path)
class TestOBJWithComments(TestOBJBase):
def test_writing_obj_with_no_comments_does_not_write_comments(self):
local_file = os.path.join(self.tmp_dir, "test_writing_ply_with_no_comments_does_not_write_comments.ply")
m = obj.load(self.test_obj_path)
obj.dump(m, local_file)
with open(local_file) as f:
self.assertNotRegexpMatches(f.read(), '#')
def test_writing_obj_with_comments_does_write_comments(self):
local_file = os.path.join(self.tmp_dir, "test_writing_ply_with_comments_does_write_comments.ply")
m = obj.load(self.test_obj_path)
obj.dump(m, local_file, comments=['foo bar', 'this is a comment'])
with open(local_file) as f:
contents = f.read()
self.assertRegexpMatches(contents, '# foo bar\n# this is a comment\n')
self.assertNotRegexpMatches(contents, '# Copyright')
class TestOBJSpecialCases(TestOBJBase):
def test_writing_segmented_mesh_preserves_face_order(self):
m = obj.load(self.test_obj_path)
self.assertTrue((m.f == self.truth['box_f']).all())
local_file = os.path.join(self.tmp_dir, 'test_writing_segmented_mesh_preserves_face_order.obj')
obj.dump(m, local_file)
m_reloaded = obj.load(local_file)
self.assertTrue((m_reloaded.f == self.truth['box_f']).all())
def test_read_overlapping_groups(self):
m = obj.load(self.test_obj_with_overlapping_groups_path)
self.assertDictOfArraysEqual(m.segm, self.truth['box_segm_overlapping'])
def test_write_overlapping_groups(self):
m = obj.load(self.test_obj_with_overlapping_groups_path)
local_file = os.path.join(self.tmp_dir, 'test_write_overlapping_groups.obj')
obj.dump(m, local_file)
self.assertFilesEqual(local_file, self.test_obj_with_overlapping_groups_path)
def test_writing_mesh_with_overlapping_segments_preserves_face_order(self):
'''
Covered by test above, but covered here in a less fragile way, for good measure.
'''
m = obj.load(self.test_obj_with_overlapping_groups_path)
self.assertTrue((m.f == self.truth['box_f']).all())
local_file = os.path.join(self.tmp_dir, 'test_writing_mesh_with_overlapping_segments_preserves_face_order.obj')
obj.dump(m, local_file)
m_reloaded = obj.load(local_file)
self.assertTrue((m_reloaded.f == self.truth['box_f']).all())
def test_writing_empty_mesh(self):
m = Mesh()
local_file = os.path.join(self.tmp_dir, 'test_writing_empty_mesh.obj')
obj.dump(m, local_file)
self.assertEqual(os.stat(local_file).st_size, 0)
class TestOBJDangerousInputs(TestOBJBase):
'''
Here we test different malformations that could be dangerous to feed to the obj parser.
We're not testing what gets loaded; we'te testing that appropriate exceptions are thrown
or we pass without failure
'''
def write_then_load(self, contents):
import tempfile
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
m = obj.load(f.name)
return m
def test_empty_file(self):
m = self.write_then_load('')
self.assertEqual(m.v, None)
self.assertEqual(m.f, None)
def test_junk(self):
with self.assertRaises(obj.LoadObjError):
self.write_then_load(dedent('''
dog
cat
rabbit
fox
''').lstrip())
def test_blanks_and_comments(self):
self.write_then_load(" \n") # spaces on a blank line
self.write_then_load("\t\n") # tabs on a blank line
self.write_then_load("#foo\n") # comments
self.write_then_load("\n\n") # consecutive newlines
self.write_then_load("\n\r") # windows file endings
self.write_then_load("\r\n") # I can never remember which order they go in
def test_tags_without_delimiters_before_data_is_malformed(self):
with self.assertRaises(obj.LoadObjError):
self.write_then_load('v1 2 3')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vn1 2 3')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vt1 2')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('f1 2 3')
def test_tags_with_extra_characters_are_malformed(self):
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vx 1 2 3')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vnx 1 2 3')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vtx 1 2')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('fx 1 2 3')
def test_verticies_must_be_numbers(self):
with self.assertRaises(obj.LoadObjError):
self.write_then_load('v x y z')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vt u v')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('vn x y z')
def test_faces_must_be_well_formed(self):
self.write_then_load('f 1 2 3')
self.write_then_load('f 1/1 2/2 3/3')
self.write_then_load('f 1//1 2//2 3//3')
self.write_then_load('f 1/1/1 2/2/2 3/3/3')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('f x y z')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('f /1 /2 /3')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('f 1/1/ 2/2/ 3/3/')
with self.assertRaises(obj.LoadObjError):
self.write_then_load('f 1/ 2/ 3/')
def test_allowed_tags(self):
self.write_then_load("mtllib foobar")
self.write_then_load("g foobar")
self.write_then_load("#landmark foobar")
self.write_then_load("usemtl foobar")
self.write_then_load("vp 1 2 3")
self.write_then_load("o foobar")
self.write_then_load("s 1")
self.write_then_load("s off")
self.write_then_load("s") # really 3dMD? what does this even mean?
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
import shutil
from stacks.utils.RMFTestCase import *
import tarfile
import tempfile
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch.object(tempfile, "gettempdir", new=MagicMock(return_value="/tmp"))
class TestFalconServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "FALCON/0.5.0.2.1/package"
STACK_VERSION = "2.1"
UPGRADE_STACK_VERSION = "2.2"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
classname="FalconServer",
command="start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assert_configure_default()
self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon-config.sh server falcon',
path = ['/usr/bin'],
user = 'falcon',
environment = {'HADOOP_HOME': '/usr/lib/hadoop'},
not_if = 'ls /var/run/falcon/falcon.pid && ps -p ',
)
self.assertResourceCalled('File', '/usr/lib/falcon/server/webapp/falcon/WEB-INF/lib/je-5.0.73.jar',
content=DownloadSource('http://c6401.ambari.apache.org:8080/resources//je-5.0.73.jar'),
mode=0755
)
self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon-start -port 15000',
path = ['/usr/bin'],
user = 'falcon',
environment = {'HADOOP_HOME': '/usr/lib/hadoop'},
not_if = 'ls /var/run/falcon/falcon.pid && ps -p ',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
classname="FalconServer",
command="stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon-stop',
path = ['/usr/bin'],
user = 'falcon',
environment = {'HADOOP_HOME': '/usr/lib/hadoop'})
self.assertResourceCalled('File', '/var/run/falcon/falcon.pid',
action = ['delete'])
self.assertNoMoreResources()
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
classname="FalconServer",
command="configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/var/run/falcon',
owner = 'falcon',
create_parents = True,
cd_access = "a",
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/falcon',
owner = 'falcon',
create_parents = True,
cd_access = "a",
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/lib/falcon/webapp',
owner = 'falcon',
create_parents = True
)
self.assertResourceCalled('Directory', '/usr/lib/falcon',
owner = 'falcon',
create_parents = True
)
self.assertResourceCalled('Directory', '/etc/falcon',
mode = 0755,
create_parents = True
)
self.assertResourceCalled('Directory', '/etc/falcon/conf',
owner = 'falcon',
create_parents = True
)
self.assertResourceCalled('File', '/etc/falcon/conf/falcon-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['falcon-env']['content']),
owner = 'falcon',
group = 'hadoop'
)
self.assertResourceCalled('PropertiesFile', '/etc/falcon/conf/client.properties',
mode = 0644,
owner = 'falcon',
properties = {u'falcon.url': u'http://{{falcon_host}}:{{falcon_port}}'}
)
self.assertResourceCalled('PropertiesFile', '/etc/falcon/conf/runtime.properties',
mode = 0644,
properties = self.getConfig()['configurations']['falcon-runtime.properties'],
owner = 'falcon'
)
self.assertResourceCalled('PropertiesFile', '/etc/falcon/conf/startup.properties',
mode = 0644,
properties = self.getConfig()['configurations']['falcon-startup.properties'],
owner = 'falcon'
)
self.assertResourceCalled('File', '/etc/falcon/conf/log4j.properties',
content=InlineTemplate(self.getConfig()['configurations']['falcon-log4j']['content']),
owner='falcon',
group='hadoop',
mode= 0644
)
self.assertResourceCalled('Directory', '/hadoop/falcon/store',
owner = 'falcon',
create_parents = True
)
self.assertResourceCalled('HdfsResource', '/apps/falcon',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'falcon',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
mode = 0777,
)
self.assertResourceCalled('HdfsResource', '/apps/data-mirroring',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'falcon',
group='users',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
recursive_chown = True,
recursive_chmod = True,
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
mode = 0770,
source='/usr/hdp/current/falcon-server/data-mirroring'
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/etc/hadoop/conf',
)
self.assertResourceCalled('Directory', '/hadoop/falcon',
owner = 'falcon',
create_parents = True,
cd_access='a'
)
self.assertResourceCalled('Directory', '/hadoop/falcon/embeddedmq',
owner = 'falcon',
create_parents = True
)
self.assertResourceCalled('Directory', '/hadoop/falcon/embeddedmq/data',
owner = 'falcon',
create_parents = True
)
@patch("os.path.isdir")
@patch("os.path.exists")
@patch("os.path.isfile")
def test_upgrade(self, isfile_mock, exists_mock, isdir_mock):
isdir_mock.return_value = True
exists_mock.side_effect = [False,True, True, True]
isfile_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
classname = "FalconServer", command = "restart", config_file = "falcon-upgrade.json",
stack_version = self.UPGRADE_STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES )
self.assertResourceCalled('Execute',
'/usr/hdp/current/falcon-server/bin/falcon-stop',
path = ['/usr/hdp/current/hadoop-client/bin'], user='falcon',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'})
self.assertResourceCalled('File', '/var/run/falcon/falcon.pid',
action = ['delete'])
self.assertResourceCalled('Execute', ('tar',
'-zcvhf',
'/tmp/falcon-upgrade-backup/falcon-local-backup.tar',
u'/hadoop/falcon'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', u'2.2.1.0-2135'),
sudo = True,
)
self.assertResourceCalled('Execute', ('tar',
'-xvf',
'/tmp/falcon-upgrade-backup/falcon-local-backup.tar',
'-C',
u'/hadoop/falcon/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Directory', '/tmp/falcon-upgrade-backup',
action = ['delete'],
)
self.assertResourceCalled('Directory', '/var/run/falcon',
owner = 'falcon',
create_parents = True,
cd_access = "a",
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/falcon',
owner = 'falcon',
create_parents = True,
cd_access = "a",
mode = 0755,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/falcon-server/webapp',
owner = 'falcon',
create_parents = True,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/falcon-server',
owner = 'falcon',
create_parents = True,
)
self.assertResourceCalled('Directory', '/etc/falcon',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/falcon-server/conf',
owner = 'falcon',
create_parents = True,
)
self.assertResourceCalled('File', '/usr/hdp/current/falcon-server/conf/falcon-env.sh',
owner = 'falcon',
content = InlineTemplate(self.getConfig()['configurations']['falcon-env']['content']),
group = 'hadoop'
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/falcon-server/conf/client.properties',
owner = u'falcon',
properties = {u'falcon.url': u'http://{{falcon_host}}:{{falcon_port}}'},
mode = 0644,
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/falcon-server/conf/runtime.properties',
owner = 'falcon',
mode = 0644,
properties = {u'*.domain': u'${falcon.app.type}',
u'*.log.cleanup.frequency.days.retention': u'days(7)',
u'*.log.cleanup.frequency.hours.retention': u'minutes(1)',
u'*.log.cleanup.frequency.minutes.retention': u'hours(6)',
u'*.log.cleanup.frequency.months.retention': u'months(3)'},
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/falcon-server/conf/startup.properties',
owner = 'falcon',
mode = 0644,
properties = self.getConfig()['configurations']['falcon-startup.properties'],
)
self.assertResourceCalled('File', '/usr/hdp/current/falcon-server/conf/log4j.properties',
content=InlineTemplate(self.getConfig()['configurations']['falcon-log4j']['content']),
owner='falcon',
group='hadoop',
mode= 0644
)
self.assertResourceCalled('Directory', '/hadoop/falcon/data/lineage/graphdb',
owner = 'falcon',
create_parents = True,
group = 'hadoop',
mode = 0775,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/hadoop/falcon/data/lineage',
owner = 'falcon',
create_parents = True,
group = 'hadoop',
mode = 0775,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/hadoop/falcon/store',
owner = 'falcon',
create_parents = True,
)
self.assertResourceCalled('HdfsResource', '/apps/falcon',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
owner = 'falcon',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0777,
)
self.assertResourceCalled('HdfsResource', '/apps/data-mirroring',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
source = '/usr/hdp/current/falcon-server/data-mirroring',
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
user = 'hdfs',
dfs_type = '',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
recursive_chmod = True,
recursive_chown = True,
owner = 'falcon',
group = 'users',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0770,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
)
self.assertResourceCalled('Directory', '/hadoop/falcon',
owner = 'falcon',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/hadoop/falcon/embeddedmq',
owner = 'falcon',
create_parents = True,
)
self.assertResourceCalled('Directory', '/hadoop/falcon/embeddedmq/data',
owner = 'falcon',
create_parents = True,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/falcon-server/bin/falcon-config.sh server falcon',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'},
path = ['/usr/hdp/current/hadoop-client/bin'],
user = 'falcon',
not_if = 'ls /var/run/falcon/falcon.pid && ps -p ',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/falcon-server/bin/falcon-start -port 15000',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'},
path = ['/usr/hdp/current/hadoop-client/bin'],
user = 'falcon',
not_if = 'ls /var/run/falcon/falcon.pid && ps -p ',
)
self.assertNoMoreResources()
@patch('os.path.isfile', new=MagicMock(return_value=True))
def test_pre_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/falcon-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
classname = "FalconServer",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', version), sudo=True,)
self.assertResourceCalled('Execute', ('tar',
'-xvf',
'/tmp/falcon-upgrade-backup/falcon-local-backup.tar',
'-C',
u'/hadoop/falcon/'),
tries = 3,
sudo = True,
try_sleep = 1,
)
self.assertResourceCalled('Directory', '/tmp/falcon-upgrade-backup',
action = ['delete'],
)
self.assertNoMoreResources()
@patch('os.path.isfile', new=MagicMock(return_value=True))
@patch.object(tarfile, 'open')
@patch.object(shutil, 'rmtree')
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_23(self, tarfile_open_mock, rmtree_mock, call_mock):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/falcon-upgrade.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
classname = "FalconServer",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', version), sudo=True,)
self.assertResourceCalled('Execute', ('tar',
'-xvf',
'/tmp/falcon-upgrade-backup/falcon-local-backup.tar',
'-C',
u'/hadoop/falcon/'),
sudo = True, tries = 3, try_sleep = 1,
)
self.assertResourceCalled('Directory', '/tmp/falcon-upgrade-backup',
action = ['delete'],
)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'falcon', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
|
|
import math
import numpy as np
from keras.layers import Input, LSTM, Dense
from keras.models import Model
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras import losses
from keras.optimizers import Adam
import tensorflow as tf
class RNN():
def __init__(self, z_dim, action_dim, reward_dim=1, hidden_units=256, gaussian_mixtures=5, batch_size=32, epochs=20, learning_rate=0.001, optim="Adam", cte=True, z_factor=1.0, reward_factor=1.0):
self.z_dim = z_dim
self.action_dim = action_dim
self.reward_dim = reward_dim
self.hidden_units = hidden_units
self.gaussian_mixtures = gaussian_mixtures
self.learning_rate = learning_rate
self.optim = optim
self.epochs = epochs
self.batch_size = batch_size
self.do_cte = cte
self.z_factor = z_factor
self.reward_factor = reward_factor
self.models = self._build()
self.model = self.models[0]
self.forward = self.models[1]
def _build(self):
#### THE MODEL THAT WILL BE TRAINED
rnn_x = Input(shape=(None, self.z_dim + self.action_dim + self.reward_dim))
lstm = LSTM(self.hidden_units, return_sequences=True, return_state = True, name="rnn")
mdn = Dense(self.gaussian_mixtures * (3*self.z_dim) + self.reward_dim, name="mdn_output")
done_hidden_layer = Dense(30, name="done_hidden")
done_layer = Dense(1, name="done_output")
if self.do_cte:
cte_hidden_layer = Dense(30, name="cte_hidden")
cte_layer = Dense(1, name="cte_output")
lstm_output_model, _ , _ = lstm(rnn_x)
mdn_model = mdn(lstm_output_model)
done_model = done_layer(done_hidden_layer(lstm_output_model))
if self.do_cte:
cte_model = cte_layer(cte_hidden_layer(lstm_output_model))
outputs = [mdn_model, done_model]
if self.do_cte:
outputs.append(cte_model)
model = Model(rnn_x, outputs)
#### THE MODEL USED DURING PREDICTION
state_input_h = Input(shape=(self.hidden_units,))
state_input_c = Input(shape=(self.hidden_units,))
lstm_output_forward , state_h, state_c = lstm(rnn_x, initial_state = [state_input_h, state_input_c])
mdn_forward = mdn(lstm_output_forward)
done_forward = done_layer(done_hidden_layer(lstm_output_forward))
forward = Model([rnn_x] + [state_input_h, state_input_c], [mdn_forward, state_h, state_c, done_forward])
#### LOSS FUNCTIONS
def rnn_z_loss(y_true, y_pred):
z_true, _ = self.get_responses(y_true, self.z_dim)
d = self.gaussian_mixtures * self.z_dim
z_pred = y_pred[:,:,:(3*d)]
z_pred = K.reshape(z_pred, [-1, self.gaussian_mixtures * 3])
log_pi, mu, log_sigma = self.get_mixture_coef_tf(z_pred)
flat_z_true = K.reshape(z_true,[-1, 1])
z_loss = log_pi + self.tf_lognormal(flat_z_true, mu, log_sigma)
z_loss = -K.log(K.sum(K.exp(z_loss), 1, keepdims=True))
z_loss = K.mean(z_loss)
return z_loss
def rnn_rew_loss(y_true, y_pred):
z_true, rew_true = self.get_responses(y_true, self.z_dim)
#d = self.gaussian_mixtures * self.z_dim
reward_pred = y_pred[:,:,-1]
#rew_loss = K.binary_crossentropy(rew_true, reward_pred, from_logits = True)
#rew_loss = K.mean(rew_loss)
rew_loss = K.mean(K.square(reward_pred - rew_true), axis=-1)
return rew_loss
def rnn_loss(y_true, y_pred):
z_loss = rnn_z_loss(y_true, y_pred)
rew_loss = rnn_rew_loss(y_true, y_pred)
return (self.z_factor * z_loss) + (self.reward_factor * rew_loss)
self.z_loss = rnn_z_loss
self.rew_loss = rnn_rew_loss
self.loss = rnn_loss
#opti = Adam(lr=self.learning_rate)
#model.compile(loss=rnn_loss, optimizer=opti, metrics = [rnn_z_loss, rnn_rew_loss])
return (model,forward)
def compile(self, main_weight=1.0, done_weight=1.0, cte_weight=1.0):
# See: https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
#model.fit({'main_input': headline_data, 'aux_input': additional_data},
# {'main_output': labels, 'aux_output': labels},
# epochs=50, batch_size=32)
losses={'mdn_output': self.loss}
loss_weights={'mdn_output': 1.0}
metrics={'mdn_output': [self.z_loss, self.rew_loss]}
losses["done_output"] = 'binary_crossentropy'
loss_weights['done_output'] = done_weight
if self.do_cte:
losses["cte_output"] = 'mean_squared_error'
loss_weights['cte_output'] = cte_weight
opti = Adam(lr=self.learning_rate)
self.model.compile(optimizer=opti, loss=losses, loss_weights=loss_weights, metrics=metrics)
def set_random_params(self, stdev=0.5):
""" See: https://github.com/zacwellmer/WorldModels/blob/master/WorldModels/rnn/rnn.py#L90
"""
params = self.model.get_weights()
rand_params = []
for param_i in params:
# David Ha's initialization scheme
sampled_param = np.random.standard_cauchy(param_i.shape)*stdev / 10000.0
rand_params.append(sampled_param)
self.model.set_weights(rand_params)
def train(self, rnn_input, rnn_output, validation_split = 0.2):
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=5, verbose=1, mode='auto')
callbacks_list = [earlystop]
hist = self.model.fit(rnn_input, rnn_output,
shuffle=True,
epochs=self.epochs,
batch_size=self.batch_size,
validation_split=validation_split,
callbacks=callbacks_list)
return hist
def train_batch(self, rnn_input, rnn_output):
self.model.fit(rnn_input, rnn_output,
shuffle=False,
epochs=1,
batch_size=len(rnn_input))
def get_hidden_units(self):
return self.hidden_units
def set_weights(self, filepath, by_name=False):
self.model.load_weights(filepath, by_name=by_name)
def save_weights(self, filepath):
self.model.save_weights(filepath)
def get_responses(self, y_true, z_dim):
z_true = y_true[:,:,:z_dim]
rew_true = y_true[:,:,-1]
return z_true, rew_true
def get_mixture_coef_tf(self, z_pred):
log_pi, mu, log_sigma = tf.split(z_pred, 3, 1)
log_pi = log_pi - K.log(K.sum(K.exp(log_pi), axis = 1, keepdims = True)) # axis 1 is the mixture axis
return log_pi, mu, log_sigma
def tf_lognormal(self, z_true, mu, log_sigma):
logSqrtTwoPI = np.log(np.sqrt(2.0 * np.pi))
return -0.5 * ((z_true - mu) / K.exp(log_sigma)) ** 2 - log_sigma - logSqrtTwoPI
@staticmethod
def get_mixture_coef_np(z_pred):
log_pi, mu, log_sigma = np.split(z_pred, 3, 1)
log_pi = log_pi - np.log(np.sum(np.exp(log_pi), axis = 1, keepdims = True))
return log_pi, mu, log_sigma
@staticmethod
def get_pi_idx(x, pdf):
# samples from a categorial distribution
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
random_value = np.random.randint(N)
#print('error with sampling ensemble, returning random', random_value)
return random_value
@staticmethod
def sample_z(mu, log_sigma):
z = mu + (np.exp(log_sigma)) * np.random.randn(*log_sigma.shape) * 0.5
return z
def sample_next_output(self, obs, h, c):
d = self.gaussian_mixtures * self.z_dim
out = self.forward.predict([np.array([[obs]]),np.array([h]),np.array([c])])
y_pred = out[0][0][0]
new_h = out[1][0]
new_c = out[2][0]
done = out[3][0]
z_pred = y_pred[:3*d]
rew_pred = y_pred[-1]
z_pred = np.reshape(z_pred, [-1, self.gaussian_mixtures * 3])
log_pi, mu, log_sigma = self.get_mixture_coef_np(z_pred)
chosen_log_pi = np.zeros(self.z_dim)
chosen_mu = np.zeros(self.z_dim)
chosen_log_sigma = np.zeros(self.z_dim)
# adjust temperatures
pi = np.copy(log_pi)
# pi -= pi.max()
pi = np.exp(pi)
pi /= pi.sum(axis=1).reshape(self.z_dim, 1)
for j in range(self.z_dim):
idx = self.get_pi_idx(np.random.rand(), pi[j])
chosen_log_pi[j] = idx
chosen_mu[j] = mu[j,idx]
chosen_log_sigma[j] = log_sigma[j,idx]
next_z = self.sample_z(chosen_mu, chosen_log_sigma)
if rew_pred > 0:
next_reward = 1
else:
next_reward = 0
return next_z, chosen_mu, chosen_log_sigma, chosen_log_pi, rew_pred, next_reward, new_h, new_c, done
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import time
from cinder.brick import exception
from cinder.brick import executor
from cinder.brick.initiator import host_driver
from cinder.brick.initiator import linuxfc
from cinder.brick.initiator import linuxscsi
from cinder.brick.remotefs import remotefs
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
synchronized = lockutils.synchronized_with_prefix('brick-')
DEVICE_SCAN_ATTEMPTS_DEFAULT = 3
def get_connector_properties(root_helper, my_ip):
"""Get the connection properties for all protocols."""
iscsi = ISCSIConnector(root_helper=root_helper)
fc = linuxfc.LinuxFibreChannel(root_helper=root_helper)
props = {}
props['ip'] = my_ip
props['host'] = socket.gethostname()
initiator = iscsi.get_initiator()
if initiator:
props['initiator'] = initiator
wwpns = fc.get_fc_wwpns()
if wwpns:
props['wwpns'] = wwpns
wwnns = fc.get_fc_wwnns()
if wwnns:
props['wwnns'] = wwnns
return props
class InitiatorConnector(executor.Executor):
def __init__(self, root_helper, driver=None,
execute=putils.execute,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
super(InitiatorConnector, self).__init__(root_helper, execute=execute,
*args, **kwargs)
if not driver:
driver = host_driver.HostDriver()
self.set_driver(driver)
self.device_scan_attempts = device_scan_attempts
def set_driver(self, driver):
"""The driver is used to find used LUNs."""
self.driver = driver
@staticmethod
def factory(protocol, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
"""Build a Connector object based upon protocol."""
LOG.debug("Factory for %s" % protocol)
protocol = protocol.upper()
if protocol == "ISCSI":
return ISCSIConnector(root_helper=root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "FIBRE_CHANNEL":
return FibreChannelConnector(root_helper=root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
elif protocol == "AOE":
return AoEConnector(root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "NFS" or protocol == "GLUSTERFS":
return RemoteFsConnector(mount_type=protocol.lower(),
root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
elif protocol == "LOCAL":
return LocalConnector(root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
else:
msg = (_("Invalid InitiatorConnector protocol "
"specified %(protocol)s") %
dict(protocol=protocol))
raise ValueError(msg)
def check_valid_device(self, path):
cmd = ('dd', 'if=%(path)s' % {"path": path},
'of=/dev/null', 'count=1')
out, info = None, None
try:
out, info = self._execute(*cmd, run_as_root=True,
root_helper=self._root_helper)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to access the device on the path "
"%(path)s: %(error)s %(info)s.") %
{"path": path, "error": e.stderr,
"info": info})
return False
# If the info is none, the path does not exist.
if info is None:
return False
return True
def connect_volume(self, connection_properties):
"""Connect to a volume.
The connection_properties describes the information needed by
the specific protocol to use to make the connection.
"""
raise NotImplementedError()
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume from the local host.
The connection_properties are the same as from connect_volume.
The device_info is returned from connect_volume.
"""
raise NotImplementedError()
class ISCSIConnector(InitiatorConnector):
"""Connector class to attach/detach iSCSI volumes."""
def __init__(self, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute)
super(ISCSIConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
self.use_multipath = use_multipath
def set_execute(self, execute):
super(ISCSIConnector, self).set_execute(execute)
self._linuxscsi.set_execute(execute)
@synchronized('connect_volume')
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
connection_properties for iSCSI must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
device_info = {'type': 'block'}
if self.use_multipath:
#multipath installed, discovering other targets if available
target_portal = connection_properties['target_portal']
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
target_portal],
check_exit_code=[0, 255])[0] \
or ""
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = connection_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(connection_properties)
host_device = self._get_device_path(connection_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(host_device):
if tries >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=host_device)
LOG.warn(_("ISCSI volume not yet found at: %(host_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'host_device': host_device,
'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(connection_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(host_device)s "
"(after %(tries)s rescans)"),
{'host_device': host_device, 'tries': tries})
if self.use_multipath:
#we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
device_info['path'] = host_device
return device_info
@synchronized('connect_volume')
def disconnect_volume(self, connection_properties, device_info):
"""Detach the volume from instance_name.
connection_properties for iSCSI must include:
target_portal - IP and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
# Moved _rescan_iscsi and _rescan_multipath
# from _disconnect_volume_multipath_iscsi to here.
# Otherwise, if we do rescan after _linuxscsi.remove_multipath_device
# but before logging out, the removed devices under /dev/disk/by-path
# will reappear after rescan.
self._rescan_iscsi()
host_device = self._get_device_path(connection_properties)
multipath_device = None
if self.use_multipath:
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device:
device_realpath = os.path.realpath(host_device)
self._linuxscsi.remove_multipath_device(device_realpath)
return self._disconnect_volume_multipath_iscsi(
connection_properties, multipath_device)
# remove the device from the scsi subsystem
# this eliminates any stale entries until logout
dev_name = self._linuxscsi.get_name_from_path(host_device)
if dev_name:
self._linuxscsi.remove_scsi_device(dev_name)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-" %
{'portal': connection_properties['target_portal'],
'iqn': connection_properties['target_iqn']})
devices = self.driver.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(connection_properties)
def _get_device_path(self, connection_properties):
path = ("/dev/disk/by-path/ip-%(portal)s-iscsi-%(iqn)s-lun-%(lun)s" %
{'portal': connection_properties['target_portal'],
'iqn': connection_properties['target_iqn'],
'lun': connection_properties.get('target_lun', 0)})
return path
def get_initiator(self):
"""Secure helper to read file as root."""
file_path = '/etc/iscsi/initiatorname.iscsi'
try:
lines, _err = self._execute('cat', file_path, run_as_root=True,
root_helper=self._root_helper)
for l in lines.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
except putils.ProcessExecutionError:
msg = (_("Could not find the iSCSI Initiator File %s")
% file_path)
LOG.warn(msg)
return None
def _run_iscsiadm(self, connection_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
connection_properties['target_iqn'],
'-p',
connection_properties['target_portal'],
*iscsi_command, run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, connection_properties, property_key,
property_value, **kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(connection_properties, iscsi_command,
**kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
return [line.split() for line in output.splitlines()]
def _disconnect_volume_multipath_iscsi(self, connection_properties,
multipath_name):
"""This removes a multipath device and it's LUNs."""
LOG.debug("Disconnect multipath device %s" % multipath_name)
block_devices = self.driver.get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
connection_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(connection_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(connection_properties, ips_iqns)
return
# else do not disconnect iscsi portals,
# as they are used for other luns
return
def _connect_to_iscsi_portal(self, connection_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(connection_properties, ())
except putils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._run_iscsiadm(connection_properties, ('--op', 'new'))
else:
raise
if connection_properties.get('auth_method'):
self._iscsiadm_update(connection_properties,
"node.session.auth.authmethod",
connection_properties['auth_method'])
self._iscsiadm_update(connection_properties,
"node.session.auth.username",
connection_properties['auth_username'])
self._iscsiadm_update(connection_properties,
"node.session.auth.password",
connection_properties['auth_password'])
#duplicate logins crash iscsiadm after load,
#so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = connection_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
connection_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(connection_properties,
("--login",),
check_exit_code=[0, 255])
except putils.ProcessExecutionError as err:
#as this might be one of many paths,
#only set successfull logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(connection_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(connection_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, connection_properties):
self._iscsiadm_update(connection_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(connection_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(connection_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _disconnect_mpath(self, connection_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = connection_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('iscsiadm',
*iscsi_command,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = self._execute('multipath',
*multipath_command,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=check_exit_code)
LOG.debug("multipath %s: stdout=%s stderr=%s" %
(multipath_command, out, err))
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath('-r', check_exit_code=[0, 1, 21])
class FibreChannelConnector(InitiatorConnector):
"""Connector class to attach/detach Fibre Channel volumes."""
def __init__(self, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
self._linuxscsi = linuxscsi.LinuxSCSI(root_helper, execute)
self._linuxfc = linuxfc.LinuxFibreChannel(root_helper, execute)
super(FibreChannelConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
self.use_multipath = use_multipath
def set_execute(self, execute):
super(FibreChannelConnector, self).set_execute(execute)
self._linuxscsi.set_execute(execute)
self._linuxfc.set_execute(execute)
@synchronized('connect_volume')
def connect_volume(self, connection_properties):
"""Attach the volume to instance_name.
connection_properties for Fibre Channel must include:
target_portal - ip and optional port
target_iqn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
LOG.debug("execute = %s" % self._execute)
device_info = {'type': 'block'}
ports = connection_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(str(wwn))
elif isinstance(ports, basestring):
wwns.append(str(ports))
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = self._linuxfc.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
connection_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
LOG.warn(msg)
raise exception.NoFibreChannelHostsFound()
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s"),
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= self.device_scan_attempts:
msg = _("Fibre Channel volume device not found.")
LOG.error(msg)
raise exception.NoFibreChannelVolumeDeviceFound()
LOG.warn(_("Fibre volume not yet found. "
"Will rescan & retry. Try number: %(tries)s"),
{'tries': tries})
self._linuxfc.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(name)s "
"(after %(tries)s rescans)"),
{'name': self.device_name, 'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
if self.use_multipath:
mdev_info = self._linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
% {'device': mdev_info['device']})
device_path = mdev_info['device']
devices = mdev_info['devices']
device_info['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
dev_info = self._linuxscsi.get_device_info(self.device_name)
devices = [dev_info]
else:
device_path = self.host_device
dev_info = self._linuxscsi.get_device_info(self.device_name)
devices = [dev_info]
device_info['path'] = device_path
device_info['devices'] = devices
return device_info
@synchronized('connect_volume')
def disconnect_volume(self, connection_properties, device_info):
"""Detach the volume from instance_name.
connection_properties for Fibre Channel must include:
target_wwn - iSCSI Qualified Name
target_lun - LUN id of the volume
"""
devices = device_info['devices']
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if self.use_multipath and 'multipath_id' in device_info:
multipath_id = device_info['multipath_id']
mdev_info = self._linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s" % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
self._linuxscsi.remove_scsi_device(device["device"])
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
class AoEConnector(InitiatorConnector):
"""Connector class to attach/detach AoE volumes."""
def __init__(self, root_helper, driver=None,
execute=putils.execute,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
super(AoEConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def _get_aoe_info(self, connection_properties):
shelf = connection_properties['target_shelf']
lun = connection_properties['target_lun']
aoe_device = 'e%(shelf)s.%(lun)s' % {'shelf': shelf,
'lun': lun}
aoe_path = '/dev/etherd/%s' % (aoe_device)
return aoe_device, aoe_path
@lockutils.synchronized('aoe_control', 'aoe-')
def connect_volume(self, connection_properties):
"""Discover and attach the volume.
connection_properties for AoE must include:
target_shelf - shelf id of volume
target_lun - lun id of volume
"""
aoe_device, aoe_path = self._get_aoe_info(connection_properties)
device_info = {
'type': 'block',
'device': aoe_device,
'path': aoe_path,
}
if os.path.exists(aoe_path):
self._aoe_revalidate(aoe_device)
else:
self._aoe_discover()
waiting_status = {'tries': 0}
#NOTE(jbr_): Device path is not always present immediately
def _wait_for_discovery(aoe_path):
if os.path.exists(aoe_path):
raise loopingcall.LoopingCallDone
if waiting_status['tries'] >= self.device_scan_attempts:
raise exception.VolumeDeviceNotFound(device=aoe_path)
LOG.warn(_("AoE volume not yet found at: %(path)s. "
"Try number: %(tries)s"),
{'path': aoe_device,
'tries': waiting_status['tries']})
self._aoe_discover()
waiting_status['tries'] += 1
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_discovery,
aoe_path)
timer.start(interval=2).wait()
if waiting_status['tries']:
LOG.debug(_("Found AoE device %(path)s "
"(after %(tries)s rediscover)"),
{'path': aoe_path,
'tries': waiting_status['tries']})
return device_info
@lockutils.synchronized('aoe_control', 'aoe-')
def disconnect_volume(self, connection_properties, device_info):
"""Detach and flush the volume.
connection_properties for AoE must include:
target_shelf - shelf id of volume
target_lun - lun id of volume
"""
aoe_device, aoe_path = self._get_aoe_info(connection_properties)
if os.path.exists(aoe_path):
self._aoe_flush(aoe_device)
def _aoe_discover(self):
(out, err) = self._execute('aoe-discover',
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=0)
LOG.debug(_('aoe-discover: stdout=%(out)s stderr%(err)s') %
{'out': out, 'err': err})
def _aoe_revalidate(self, aoe_device):
(out, err) = self._execute('aoe-revalidate',
aoe_device,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=0)
LOG.debug(_('aoe-revalidate %(dev)s: stdout=%(out)s stderr%(err)s') %
{'dev': aoe_device, 'out': out, 'err': err})
def _aoe_flush(self, aoe_device):
(out, err) = self._execute('aoe-flush',
aoe_device,
run_as_root=True,
root_helper=self._root_helper,
check_exit_code=0)
LOG.debug(_('aoe-flush %(dev)s: stdout=%(out)s stderr%(err)s') %
{'dev': aoe_device, 'out': out, 'err': err})
class RemoteFsConnector(InitiatorConnector):
"""Connector class to attach/detach NFS and GlusterFS volumes."""
def __init__(self, mount_type, root_helper, driver=None,
execute=putils.execute,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
kwargs = kwargs or {}
conn = kwargs.get('conn')
if conn:
mount_point_base = conn.get('mount_point_base')
if mount_type.lower() == 'nfs':
kwargs['nfs_mount_point_base'] =\
kwargs.get('nfs_mount_point_base') or\
mount_point_base
elif mount_type.lower() == 'glusterfs':
kwargs['glusterfs_mount_point_base'] =\
kwargs.get('glusterfs_mount_point_base') or\
mount_point_base
else:
LOG.warn(_("Connection details not present."
" RemoteFsClient may not initialize properly."))
self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper,
execute=execute,
*args, **kwargs)
super(RemoteFsConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def set_execute(self, execute):
super(RemoteFsConnector, self).set_execute(execute)
self._remotefsclient.set_execute(execute)
def connect_volume(self, connection_properties):
"""Ensure that the filesystem containing the volume is mounted.
connection_properties must include:
export - remote filesystem device (e.g. '172.18.194.100:/var/nfs')
name - file name within the filesystem
connection_properties may optionally include:
options - options to pass to mount
"""
mnt_flags = []
if connection_properties.get('options'):
mnt_flags = connection_properties['options'].split()
nfs_share = connection_properties['export']
self._remotefsclient.mount(nfs_share, mnt_flags)
mount_point = self._remotefsclient.get_mount_point(nfs_share)
path = mount_point + '/' + connection_properties['name']
return {'path': path}
def disconnect_volume(self, connection_properties, device_info):
"""No need to do anything to disconnect a volume in a filesystem."""
class LocalConnector(InitiatorConnector):
""""Connector class to attach/detach File System backed volumes."""
def __init__(self, root_helper, driver=None, execute=putils.execute,
*args, **kwargs):
super(LocalConnector, self).__init__(root_helper, driver=driver,
execute=execute, *args, **kwargs)
def connect_volume(self, connection_properties):
"""Connect to a volume.
connection_properties must include:
device_path - path to the volume to be connected
"""
if 'device_path' not in connection_properties:
msg = (_("Invalid connection_properties specified "
"no device_path attribute"))
raise ValueError(msg)
device_info = {'type': 'local',
'path': connection_properties['device_path']}
return device_info
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume from the local host."""
pass
|
|
"""
Test the challenger by sending requests to it via a TiddlyWeb instance.
The LDAP interface is mocked as setting up a real one in a test
environment is too much effort.
"""
import httplib2
import ldap
from mock import Mock
from test.fixtures import initialize_app
def setup_module():
initialize_app()
def test_challenger_get_responds_with_401():
http = httplib2.Http()
response, content = http.request('http://our_test_domain:8001/challenge/tiddlywebplugins.ldapauth', method='GET')
assert response['status'] == '401'
def test_challenger_get_responds_with_login_form():
http = httplib2.Http()
response, content = http.request('http://our_test_domain:8001/challenge/tiddlywebplugins.ldapauth', method='GET')
assert response['content-type'] == 'text/html; charset=UTF-8'
_assert_form(content)
def test_challenger_get_includes_redirect():
http = httplib2.Http()
response, content = \
http.request('http://our_test_domain:8001/challenge/tiddlywebplugins.ldapauth?tiddlyweb_redirect=/foo',
method='GET')
_assert_form(content, redirect='/foo')
def test_post_valid_user_credentials_responds_with_303():
mock_ldap, mock_initialize = _mock_good_ldap_bind()
try:
_send_good_login()
except httplib2.RedirectLimit, e:
raised = 1
mock_ldap.initialize.assert_called_once_with('ldap://127.0.0.1:389')
mock_initialize.simple_bind_s.assert_called_once_with('cn=pads,dc=localhost', 'letmein')
assert raised
assert e.response['status'] == '303'
def test_post_valid_user_credentials_sets_cookie():
_mock_good_ldap_bind()
try:
_send_good_login()
except httplib2.RedirectLimit, e:
raised = 1
assert raised
assert 'tiddlyweb_user="pads:0af5c9b' in e.response['set-cookie']
def test_post_valid_user_credentials_applies_redirect():
_mock_good_ldap_bind()
try:
_send_good_login(redirect='/bags')
except httplib2.RedirectLimit, e:
raised = 1
assert raised
headers = {'cookie': e.response['set-cookie']}
http = httplib2.Http()
response, content = http.request(e.response['location'], method='GET', headers=headers)
assert response['status'] == '200'
assert '<title>TiddlyWeb - Bags</title>' in content
def test_post_invalid_user_credentials_responds_with_401():
mock_ldap, mock_initialize = _mock_bad_ldap_bind()
response, content = _send_bad_login()
mock_ldap.initialize.assert_called_once_with('ldap://127.0.0.1:389')
mock_initialize.simple_bind_s.assert_called_once_with('cn=imposter,dc=localhost', 'letmein')
assert response['status'] == '401'
def test_post_invalid_user_credentials_responds_with_login_form():
mock_ldap, mock_initialize = _mock_bad_ldap_bind()
response, content = _send_bad_login()
mock_ldap.initialize.assert_called_once_with('ldap://127.0.0.1:389')
mock_initialize.simple_bind_s.assert_called_once_with('cn=imposter,dc=localhost', 'letmein')
_assert_form(content, 'Invalid user credentials, please try again')
def test_post_invalid_user_credentials_preserves_redirect_in_form():
mock_ldap, mock_initialize = _mock_bad_ldap_bind()
response, content = _send_bad_login(redirect='/bar')
mock_ldap.initialize.assert_called_once_with('ldap://127.0.0.1:389')
mock_initialize.simple_bind_s.assert_called_once_with('cn=imposter,dc=localhost', 'letmein')
_assert_form(content, 'Invalid user credentials, please try again', redirect='/bar')
def test_post_can_use_custom_ldap_config():
from tiddlyweb.config import config
config['ldapauth'] = {
'ldap_host': '1.2.3.4',
'ldap_port': '56789'
}
mock_ldap, mock_initialize = _mock_bad_ldap_bind()
_send_bad_login()
mock_ldap.initialize.assert_called_once_with('ldap://1.2.3.4:56789')
def test_no_ldap_connection_responds_with_504():
mock_ldap, mock_initialize = _mock_bad_ldap_bind(exception=ldap.SERVER_DOWN({'desc': "Can't contact LDAP server"}))
response, content = _send_good_login()
mock_ldap.initialize.assert_called_once()
mock_initialize.simple_bind_s.assert_called_once_with('cn=pads,dc=localhost', 'letmein')
assert response['status'] == '504'
def test_no_ldap_connection_responds_with_login_form():
mock_ldap, mock_initialize = _mock_bad_ldap_bind(exception=ldap.SERVER_DOWN({'desc': "Can't contact LDAP server"}))
response, content = _send_good_login()
mock_ldap.initialize.assert_called_once()
mock_initialize.simple_bind_s.assert_called_once_with('cn=pads,dc=localhost', 'letmein')
_assert_form(content, 'Unable to reach authorization provider, please contact your administrator')
def test_no_ldap_connection_preserves_redirect_in_form():
mock_ldap, mock_initialize = _mock_bad_ldap_bind(exception=ldap.SERVER_DOWN({'desc': "Can't contact LDAP server"}))
response, content = _send_good_login(redirect='/baz')
mock_ldap.initialize.assert_called_once()
mock_initialize.simple_bind_s.assert_called_once_with('cn=pads,dc=localhost', 'letmein')
_assert_form(content, 'Unable to reach authorization provider, please contact your administrator', redirect='/baz')
def test_configured_base_dn_is_used_as_part_of_login():
from tiddlyweb.config import config
config['ldapauth'] = {
'ldap_base_dn': 'dc=tiddlyweb,dc=org'
}
mock_ldap, mock_initialize = _mock_good_ldap_bind()
try:
_send_good_login()
except httplib2.RedirectLimit, e:
raised = 1
mock_ldap.initialize.assert_called_once_with('ldap://127.0.0.1:389')
mock_initialize.simple_bind_s.assert_called_once_with('cn=pads,dc=tiddlyweb,dc=org', 'letmein')
assert raised
def test_when_tiddlyspace_mode_configured_get_returns_form_with_csrf_token():
from tiddlyweb.config import config
config['ldapauth'] = {
'ldap_tiddlyspace_mode': True
}
http = httplib2.Http()
response, content = http.request('http://our_test_domain:8001/challenge/tiddlywebplugins.ldapauth', method='GET')
assert response['content-type'] == 'text/html; charset=UTF-8'
_assert_csrf_form(content)
def test_when_tiddlyspace_mode_configured_auth_failure_returns_form_with_csrf_token():
mock_ldap, mock_initialize = _mock_bad_ldap_bind()
response, content = _send_bad_login()
mock_ldap.initialize.assert_called_once_with('ldap://127.0.0.1:389')
mock_initialize.simple_bind_s.assert_called_once_with('cn=imposter,dc=localhost', 'letmein')
def test_when_tiddlyspace_mode_configured_no_ldap_connection_returns_form_with_csrf_token():
mock_ldap, mock_initialize = _mock_bad_ldap_bind(exception=ldap.SERVER_DOWN({'desc': "Can't contact LDAP server"}))
response, content = _send_good_login()
mock_ldap.initialize.assert_called_once()
mock_initialize.simple_bind_s.assert_called_once_with('cn=pads,dc=localhost', 'letmein')
_assert_csrf_form(content, 'Unable to reach authorization provider, please contact your administrator')
def _assert_form(content, error_message='', redirect='/'):
assert content == """
<p>%s</p>
<form action="" method="POST">
<label>
User:
<input name="user" />
</label>
<label>
Password:
<input type="password" name="password" />
</label>
<input type="hidden" name="tiddlyweb_redirect" value="%s" />
<input type="submit" value="submit" />
</form>
""" % (error_message, redirect)
def _assert_csrf_form(content, error_message='', redirect='/'):
assert content == """
<p>%s</p>
<form action="" method="POST">
<label>
User:
<input name="user" />
</label>
<label>
Password:
<input type="password" name="password" />
</label>
<input type="hidden" name="tiddlyweb_redirect" value="%s" />
<input type="hidden" id="csrf_token" name="csrf_token" />
<input type="submit" value="submit" />
</form>
<script type="text/javascript" src="/bags/tiddlyspace/tiddlers/TiddlySpaceCSRF"></script>
<script type="text/javascript">
var csrfToken = window.getCSRFToken(),
el = null;
if (csrfToken) {
el = document.getElementById('csrf_token');
el.value = csrfToken;
}
</script>
""" % (error_message, redirect)
def _mock_good_ldap_bind():
mock_ldap = ldap
mock_initialize = ldap.initialize
mock_ldap.initialize = Mock(name='ldap_init', return_value=mock_initialize)
mock_initialize.simple_bind_s = Mock(name='ldap_bind')
return mock_ldap, mock_initialize
def _mock_bad_ldap_bind(exception=ldap.INVALID_CREDENTIALS({'desc': 'Invalid Credentials'})):
mock_ldap = ldap
mock_initialize = ldap.initialize
mock_ldap.initialize = Mock(name='ldap_init', return_value=mock_initialize)
# The Python LDAP interface does not distinguish between an invalid DN (the user) and a bad password
mock_initialize.simple_bind_s = Mock(name='ldap_bind',
side_effect=exception)
return mock_ldap, mock_initialize
def _send_good_login(redirect='/'):
if redirect != '/':
query = 'user=pads&password=letmein&tiddlyweb_redirect=%s' % redirect
else:
query = 'user=pads&password=letmein'
http = httplib2.Http()
return http.request('http://our_test_domain:8001/challenge/tiddlywebplugins.ldapauth',
method='POST',
headers={'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'},
body=query,
redirections=0)
def _send_bad_login(redirect='/'):
if redirect != '/':
query = 'user=imposter&password=letmein&tiddlyweb_redirect=%s' % redirect
else:
query = 'user=imposter&password=letmein'
http = httplib2.Http()
return http.request('http://our_test_domain:8001/challenge/tiddlywebplugins.ldapauth',
method='POST',
headers={'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'},
body=query,
redirections=0)
|
|
#!/usr/bin/env python3
PKG = 'lg_earth'
NAME = 'test_kmlsync'
KMLSYNC_HOST = '127.0.0.1'
KMLSYNC_PORT = 8765
KML_ENDPOINT = 'http://' + KMLSYNC_HOST + ':' + str(KMLSYNC_PORT)
WINDOW_SLUG = 'center'
import sys
import re
import time
import json
import rospy
import rostest
import unittest
import requests
import xml.etree.ElementTree as ET
from std_msgs.msg import String
from xml.sax.saxutils import escape
from lg_common.helpers import escape_asset_url, generate_cookie
from lg_earth import KmlUpdateHandler
from interactivespaces_msgs.msg import GenericMessage
from threading import Thread
from multiprocessing.pool import ThreadPool
from subprocess import Popen
QUERY_TOPIC = '/earth/query/tour'
SCENE_TOPIC = '/director/scene'
LPNODE = 'testing_kmlsync_node'
timeout_for_requests = 1
EMPTY_MESSAGE = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": []
}
"""
DIRECTOR_MESSAGE = """
{
"description": "bogus",
"duration": 0,
"name": "test whatever",
"resource_uri": "bogus",
"slug": "test message",
"windows": [
{
"activity": "earth",
"assets": [
"http://lg-head:8060/media.kml",
"http://lg-head:8060/media/blah.kml",
"http://lg-head/zomgflolrlmao.kml"
],
"height": 1080,
"presentation_viewport": "center",
"width": 1920,
"x_coord": 0,
"y_coord": 0
},
{
"activity": "earth",
"assets": [
"http://lg-head:8060/blah/right_one_content.kml"
],
"height": 1080,
"presentation_viewport": "right_one",
"width": 1920,
"x_coord": 0,
"y_coord": 0
}
]
}
"""
class QueryTestSubscriber:
def __init__(self, planet, tour):
self.planet_pub = rospy.Publisher('/earth/planet', String, queue_size=1)
self.planet_sub = rospy.Subscriber('/earth/query/planet', String, self.process_planet)
self.playtour_sub = rospy.Subscriber('/earth/query/tour', String, self.process_tour)
self.expected_planet = planet
self.expected_tour = tour
self.reset()
def process_planet(self, data):
# if data.data == self.expected_planet:
self.got_planet = True
self.planet_pub.publish(self.expected_planet)
sys.exit()
def process_tour(self, data):
if data.data == self.expected_tour and self.got_planet:
self.got_planet = True
def planet_received(self):
return self.got_planet
def tour_received(self):
return self.got_tour
def reset(self):
self.got_planet = False
self.got_tour = False
class TestKMLSync(unittest.TestCase):
def setUp(self):
self.session = requests.Session()
self.test_planet = 'neptune'
self.test_tour = 'lostinspace'
self.query_test_subscriber = QueryTestSubscriber(self.test_planet, self.test_tour)
rospy.Subscriber(QUERY_TOPIC, String, self._listen_query_string)
self.wait_for_http()
self.query_string = ''
self._send_director_message(empty=True)
def tearDown(self):
self.session.close()
def _listen_query_string(self, msg):
self.query_string = msg.data
def get_director_msg(self):
msg = GenericMessage()
msg.type = 'json'
msg.message = DIRECTOR_MESSAGE
return msg
def get_empty_director_msg(self):
msg = GenericMessage()
msg.type = 'json'
msg.message = EMPTY_MESSAGE
return msg
def get_request(self, url):
r = self.session.get(url, timeout=timeout_for_requests, stream=False)
return r
def wait_for_pubsub(self):
# wait at most 5 seconds for listenerpublisher to be registered
timeout_t = time.time() + 5.0
while not rostest.is_subscriber(
rospy.resolve_name(PUBTOPIC),
rospy.resolve_name(LPNODE)) and time.time() < timeout_t:
time.sleep(0.1)
self.assert_(rostest.is_subscriber(
rospy.resolve_name(PUBTOPIC),
rospy.resolve_name(LPNODE)), "%s is not up" % LPNODE)
def wait_for_http(self):
# TODO: implement this
rospy.sleep(3.0)
def test_1_master_kml_200(self):
r = self.get_request(KML_ENDPOINT + '/master.kml')
result = r.status_code
expected = 200
self.assertEqual(result, expected)
def test_2_master_kml_content(self):
r = self.get_request(KML_ENDPOINT + '/master.kml')
result = ET.fromstring(r.content).find('.//{http://www.opengis.net/kml/2.2}Document').attrib['id']
expected = 'master'
self.assertEqual(result, expected)
def test_3_network_link_update_kml_without_params(self):
r = self.get_request(KML_ENDPOINT + '/network_link_update.kml')
result = r.status_code
expected = 400
self.assertEqual(result, expected)
def test_4_network_link_update_cookie_string_is_initially_empty(self):
self._test_empty_cookie_string_when_no_state_is_set()
def _test_empty_cookie_string_when_no_state_is_set(self):
r = self.get_request(KML_ENDPOINT + '/network_link_update.kml?window_slug=' + WINDOW_SLUG)
result = get_cookie_string(r.content)
expected = ''
self.assertEqual(result, expected)
def test_5_cookie_string_from_director(self):
"""
- send director message
- assert assets and cookie string
- make GET request to assert:
- cookie string
- CREATE/DELETE sections
When there no assets to be loaded or unloaded, the KML should look like this:
<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">
<NetworkLinkControl>
<minRefreshPeriod>1</minRefreshPeriod>
<maxSessionLength>-1</maxSessionLength>
<cookie><![CDATA[]]></cookie>
<Update>
<targetHref>http://localhost:9001/master.kml</targetHref>
</Update>
</NetworkLinkControl>
</kml>
"""
self._send_director_message()
self._test_director_state()
def _test_director_state(self):
"""
Tests for the expected state when a director message has been sent.
"""
r = self.get_request(KML_ENDPOINT + '/network_link_update.kml?window_slug=center')
rospy.loginfo("r.content => '%s'" % escape(r.content.decode('utf-8')))
asset_urls = json.loads(DIRECTOR_MESSAGE)['windows'][0]['assets']
expected_cookie = 'asset_slug=' + generate_cookie(asset_urls)
expected_list_of_created_slugs = list(map(escape_asset_url, asset_urls))
expected_list_of_deleted_slugs = []
# start testing...
rospy.sleep(1)
self.assertEqual(expected_cookie, get_cookie_string(r.content))
self.assertEqual(sorted(expected_list_of_created_slugs), sorted(get_created_elements(r.content)))
self.assertEqual(expected_list_of_deleted_slugs, get_deleted_elements(r.content))
def test_6_asset_state_in_url(self):
self._send_director_message()
assets = json.loads(DIRECTOR_MESSAGE)['windows'][0]['assets']
delete_slug = 'http___foo_bar_kml'
cookie = 'asset_slug=' + generate_cookie([assets[0], delete_slug])
r = self.get_request(KML_ENDPOINT + '/network_link_update.kml?window_slug=center&%s' % cookie)
expected_list_of_created_slugs = list(map(escape_asset_url, assets[1:]))
expected_list_of_deleted_slugs = [delete_slug]
self.assertEqual(sorted(expected_list_of_created_slugs), sorted(get_created_elements(r.content)))
self.assertEqual(expected_list_of_deleted_slugs, get_deleted_elements(r.content))
def test_7_queryfile_message(self):
"""
make a bad get request to get html and assert for 400
make a legit get request to get 'OK' and status_code 200 and assert for the message that was sent
make a request for two commands, and see that it works
"""
self.query_test_subscriber.reset()
expected_status = 400
bad1 = self.get_request(KML_ENDPOINT + "/query.html")
bad2 = self.get_request(KML_ENDPOINT + "/query.html?query")
bad3 = self.get_request(KML_ENDPOINT + "/query.html?query=")
self.assertEqual(bad1.status_code, expected_status)
self.assertEqual(bad2.status_code, expected_status)
self.assertEqual(bad3.status_code, expected_status)
expected_status = 200
expected_string = "OK"
# self.wait_for_pubsub()
good1 = self.get_request(KML_ENDPOINT + "/query.html?query=playtour=myworldtour")
rospy.sleep(1)
good1_expected_string = "myworldtour"
self.assertEqual(self.query_string, good1_expected_string)
# NB! Google Earth won't play tours with spaces in the name, so don't
# what this does here
good2 = self.get_request(KML_ENDPOINT + "/query.html?query=playtour=My World Tour")
rospy.sleep(1)
good2_expected_string = "My World Tour"
self.assertEqual(self.query_string, good2_expected_string)
good3 = self.get_request(KML_ENDPOINT + "/query.html?query=planet=%s,playtour=%s" %
(self.test_planet, self.test_tour))
rospy.sleep(1)
self.assertEqual(good1.status_code, expected_status)
self.assertEqual(good2.status_code, expected_status)
self.assertEqual(good1.content.decode('utf-8'), expected_string)
self.assertEqual(good2.content.decode('utf-8'), expected_string)
def test_8_send_request_before_state_change(self):
"""
This test will make sure that requests sent before a statechange will
get the proper return when the statechange happens before the request
is returned
"""
if timeout_for_requests <= 1:
return # not tesable with small timeout for requests
t = Thread(target=self._sleep_and_send_director)
t.start()
rospy.sleep(1)
self._test_director_state()
t.join()
def test_9_multiple_requests_before_state_change(self):
"""
This tests when requests are made that require no state change
sit on the dict while the state changes, and return with that
new changed state.
"""
if timeout_for_requests <= 1:
return # not tesable with small timeout for requests
async_requests = []
for i in range(5):
pool = ThreadPool(processes=2)
async_requests.append(pool.apply_async(self._test_director_state))
self._send_director_message()
for thread in async_requests:
try:
thread.get()
except Exception:
self.fail("Invalid director message retuned from queued request")
def _send_director_message(self, empty=False):
director_publisher = rospy.Publisher(SCENE_TOPIC, GenericMessage)
rospy.sleep(1)
msg = self.get_director_msg()
if empty:
msg = self.get_empty_director_msg()
director_publisher.publish(msg)
rospy.sleep(1)
def _sleep_and_send_director(self):
"""
Used to sleep then send a director message, helpful in our threads.
"""
rospy.sleep(3)
self._send_director_message()
def get_cookie_string(s):
ret = re.search('\\<\\!\\[CDATA\\[(.*)\\]\\]\\>', s.decode('utf-8'), re.M)
if ret and len(ret.groups()) > 0:
return ret.groups()[0]
rospy.logerr('could not find matching pattern for CDATA in {}'.format(s))
return ''
def get_created_elements(x):
try:
tmp = ET.fromstring(x).find('.//{http://www.opengis.net/kml/2.2}Create').findall('.//{http://www.opengis.net/kml/2.2}name')
return [elem.text for elem in tmp]
except AttributeError:
return []
def get_deleted_elements(x):
try:
return [elem.attrib['targetId'] for elem in ET.fromstring(x).find('.//{http://www.opengis.net/kml/2.2}Delete').getchildren()]
except AttributeError:
return []
if __name__ == '__main__':
rospy.init_node('test_director')
timeout_for_requests = rospy.get_param('~timeout_requests_session', 1)
rostest.rosrun(PKG, NAME, TestKMLSync, sys.argv)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
from __future__ import absolute_import, print_function
import itertools
from collections import defaultdict
from datetime import timedelta
import six
from django.conf import settings
from django.db.models import Min, Q
from django.utils import timezone
from sentry import tagstore, tsdb
from sentry.app import env
from sentry.api.serializers import Serializer, register, serialize
from sentry.api.serializers.models.actor import ActorSerializer
from sentry.api.fields.actor import Actor
from sentry.auth.superuser import is_active_superuser
from sentry.constants import LOG_LEVELS, StatsPeriod
from sentry.models import (
Commit,
Environment,
Group,
GroupAssignee,
GroupBookmark,
GroupEnvironment,
GroupLink,
GroupMeta,
GroupResolution,
GroupSeen,
GroupSnooze,
GroupShare,
GroupStatus,
GroupSubscription,
GroupSubscriptionReason,
Integration,
User,
UserOption,
UserOptionValue,
)
from sentry.tagstore.snuba.backend import SnubaTagStorage
from sentry.tsdb.snuba import SnubaTSDB
from sentry.utils.db import attach_foreignkey
from sentry.utils.safe import safe_execute
SUBSCRIPTION_REASON_MAP = {
GroupSubscriptionReason.comment: "commented",
GroupSubscriptionReason.assigned: "assigned",
GroupSubscriptionReason.bookmark: "bookmarked",
GroupSubscriptionReason.status_change: "changed_status",
GroupSubscriptionReason.mentioned: "mentioned",
}
disabled = object()
# TODO(jess): remove when snuba is primary backend
snuba_tsdb = SnubaTSDB(**settings.SENTRY_TSDB_OPTIONS)
class GroupSerializerBase(Serializer):
def _get_seen_stats(self, item_list, user):
"""
Returns a dictionary keyed by item that includes:
- times_seen
- first_seen
- last_seen
- user_count
"""
raise NotImplementedError
def _get_subscriptions(self, item_list, user):
"""
Returns a mapping of group IDs to a two-tuple of (subscribed: bool,
subscription: GroupSubscription or None) for the provided user and
groups.
"""
if not item_list:
return {}
# Collect all of the projects to look up, and keep a set of groups that
# are part of that project. (Note that the common -- but not only --
# case here is that all groups are part of the same project.)
projects = defaultdict(set)
for group in item_list:
projects[group.project].add(group)
# Fetch the options for each project -- we'll need this to identify if
# a user has totally disabled workflow notifications for a project.
# NOTE: This doesn't use `values_list` because that bypasses field
# value decoding, so the `value` field would not be unpickled.
options = {
option.project_id: option.value
for option in UserOption.objects.filter(
Q(project__in=projects.keys()) | Q(project__isnull=True),
user=user,
key="workflow:notifications",
)
}
# If there is a subscription record associated with the group, we can
# just use that to know if a user is subscribed or not, as long as
# notifications aren't disabled for the project.
subscriptions = {
subscription.group_id: subscription
for subscription in GroupSubscription.objects.filter(
group__in=list(
itertools.chain.from_iterable(
itertools.imap(
lambda project__groups: project__groups[1]
if not options.get(project__groups[0].id, options.get(None))
== UserOptionValue.no_conversations
else [],
projects.items(),
)
)
),
user=user,
)
}
# This is the user's default value for any projects that don't have
# the option value specifically recorded. (The default
# "participating_only" value is convention.)
global_default_workflow_option = options.get(None, UserOptionValue.participating_only)
results = {}
for project, groups in projects.items():
project_default_workflow_option = options.get(
project.id, global_default_workflow_option
)
for group in groups:
subscription = subscriptions.get(group.id)
if subscription is not None:
results[group.id] = (subscription.is_active, subscription)
else:
results[group.id] = (
(project_default_workflow_option == UserOptionValue.all_conversations, None)
if project_default_workflow_option != UserOptionValue.no_conversations
else disabled
)
return results
def get_attrs(self, item_list, user):
from sentry.plugins import plugins
GroupMeta.objects.populate_cache(item_list)
attach_foreignkey(item_list, Group.project)
if user.is_authenticated() and item_list:
bookmarks = set(
GroupBookmark.objects.filter(user=user, group__in=item_list).values_list(
"group_id", flat=True
)
)
seen_groups = dict(
GroupSeen.objects.filter(user=user, group__in=item_list).values_list(
"group_id", "last_seen"
)
)
subscriptions = self._get_subscriptions(item_list, user)
else:
bookmarks = set()
seen_groups = {}
subscriptions = defaultdict(lambda: (False, None))
assignees = {
a.group_id: a.assigned_actor()
for a in GroupAssignee.objects.filter(group__in=item_list)
}
resolved_assignees = Actor.resolve_dict(assignees)
ignore_items = {g.group_id: g for g in GroupSnooze.objects.filter(group__in=item_list)}
resolved_item_list = [i for i in item_list if i.status == GroupStatus.RESOLVED]
if resolved_item_list:
release_resolutions = {
i[0]: i[1:]
for i in GroupResolution.objects.filter(group__in=resolved_item_list).values_list(
"group", "type", "release__version", "actor_id"
)
}
# due to our laziness, and django's inability to do a reasonable join here
# we end up with two queries
commit_results = list(
Commit.objects.extra(
select={"group_id": "sentry_grouplink.group_id"},
tables=["sentry_grouplink"],
where=[
"sentry_grouplink.linked_id = sentry_commit.id",
"sentry_grouplink.group_id IN ({})".format(
", ".join(six.text_type(i.id) for i in resolved_item_list)
),
"sentry_grouplink.linked_type = %s",
"sentry_grouplink.relationship = %s",
],
params=[int(GroupLink.LinkedType.commit), int(GroupLink.Relationship.resolves)],
)
)
commit_resolutions = {
i.group_id: d
for i, d in itertools.izip(commit_results, serialize(commit_results, user))
}
else:
release_resolutions = {}
commit_resolutions = {}
actor_ids = set(r[-1] for r in six.itervalues(release_resolutions))
actor_ids.update(r.actor_id for r in six.itervalues(ignore_items))
if actor_ids:
users = list(User.objects.filter(id__in=actor_ids, is_active=True))
actors = {u.id: d for u, d in itertools.izip(users, serialize(users, user))}
else:
actors = {}
share_ids = dict(
GroupShare.objects.filter(group__in=item_list).values_list("group_id", "uuid")
)
result = {}
seen_stats = self._get_seen_stats(item_list, user)
for item in item_list:
active_date = item.active_at or item.first_seen
annotations = []
for plugin in plugins.for_project(project=item.project, version=1):
safe_execute(plugin.tags, None, item, annotations, _with_transaction=False)
for plugin in plugins.for_project(project=item.project, version=2):
annotations.extend(
safe_execute(plugin.get_annotations, group=item, _with_transaction=False) or ()
)
from sentry.integrations import IntegrationFeatures
for integration in Integration.objects.filter(
organizations=item.project.organization_id
):
if not (
integration.has_feature(IntegrationFeatures.ISSUE_BASIC)
or integration.has_feature(IntegrationFeatures.ISSUE_SYNC)
):
continue
install = integration.get_installation(item.project.organization_id)
annotations.extend(
safe_execute(install.get_annotations, group=item, _with_transaction=False) or ()
)
from sentry.models import PlatformExternalIssue
annotations.extend(
safe_execute(
PlatformExternalIssue.get_annotations, group=item, _with_transaction=False
)
or ()
)
resolution_actor = None
resolution_type = None
resolution = release_resolutions.get(item.id)
if resolution:
resolution_type = "release"
resolution_actor = actors.get(resolution[-1])
if not resolution:
resolution = commit_resolutions.get(item.id)
if resolution:
resolution_type = "commit"
ignore_item = ignore_items.get(item.id)
if ignore_item:
ignore_actor = actors.get(ignore_item.actor_id)
else:
ignore_actor = None
result[item] = {
"assigned_to": resolved_assignees.get(item.id),
"is_bookmarked": item.id in bookmarks,
"subscription": subscriptions[item.id],
"has_seen": seen_groups.get(item.id, active_date) > active_date,
"annotations": annotations,
"ignore_until": ignore_item,
"ignore_actor": ignore_actor,
"resolution": resolution,
"resolution_type": resolution_type,
"resolution_actor": resolution_actor,
"share_id": share_ids.get(item.id),
}
result[item].update(seen_stats.get(item, {}))
return result
def serialize(self, obj, attrs, user):
status = obj.status
status_details = {}
if attrs["ignore_until"]:
snooze = attrs["ignore_until"]
if snooze.is_valid(group=obj):
# counts return the delta remaining when window is not set
status_details.update(
{
"ignoreCount": (
snooze.count - (obj.times_seen - snooze.state["times_seen"])
if snooze.count and not snooze.window
else snooze.count
),
"ignoreUntil": snooze.until,
"ignoreUserCount": (
snooze.user_count - (attrs["user_count"] - snooze.state["users_seen"])
if snooze.user_count and not snooze.user_window
else snooze.user_count
),
"ignoreUserWindow": snooze.user_window,
"ignoreWindow": snooze.window,
"actor": attrs["ignore_actor"],
}
)
else:
status = GroupStatus.UNRESOLVED
if status == GroupStatus.UNRESOLVED and obj.is_over_resolve_age():
status = GroupStatus.RESOLVED
status_details["autoResolved"] = True
if status == GroupStatus.RESOLVED:
status_label = "resolved"
if attrs["resolution_type"] == "release":
res_type, res_version, _ = attrs["resolution"]
if res_type in (GroupResolution.Type.in_next_release, None):
status_details["inNextRelease"] = True
elif res_type == GroupResolution.Type.in_release:
status_details["inRelease"] = res_version
status_details["actor"] = attrs["resolution_actor"]
elif attrs["resolution_type"] == "commit":
status_details["inCommit"] = attrs["resolution"]
elif status == GroupStatus.IGNORED:
status_label = "ignored"
elif status in [GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS]:
status_label = "pending_deletion"
elif status == GroupStatus.PENDING_MERGE:
status_label = "pending_merge"
else:
status_label = "unresolved"
# If user is not logged in and member of the organization,
# do not return the permalink which contains private information i.e. org name.
request = env.request
is_superuser = request and is_active_superuser(request) and request.user == user
if is_superuser or (
user.is_authenticated() and user.get_orgs().filter(id=obj.organization.id).exists()
):
permalink = obj.get_absolute_url()
else:
permalink = None
subscription_details = None
if attrs["subscription"] is not disabled:
is_subscribed, subscription = attrs["subscription"]
if subscription is not None and subscription.is_active:
subscription_details = {
"reason": SUBSCRIPTION_REASON_MAP.get(subscription.reason, "unknown")
}
else:
is_subscribed = False
subscription_details = {"disabled": True}
share_id = attrs["share_id"]
return {
"id": six.text_type(obj.id),
"shareId": share_id,
"shortId": obj.qualified_short_id,
"count": six.text_type(attrs["times_seen"]),
"userCount": attrs["user_count"],
"title": obj.title,
"culprit": obj.culprit,
"permalink": permalink,
"firstSeen": attrs["first_seen"],
"lastSeen": attrs["last_seen"],
"logger": obj.logger or None,
"level": LOG_LEVELS.get(obj.level, "unknown"),
"status": status_label,
"statusDetails": status_details,
"isPublic": share_id is not None,
"platform": obj.platform,
"project": {
"id": six.text_type(obj.project.id),
"name": obj.project.name,
"slug": obj.project.slug,
"platform": obj.project.platform,
},
"type": obj.get_event_type(),
"metadata": obj.get_event_metadata(),
"numComments": obj.num_comments,
"assignedTo": serialize(attrs["assigned_to"], user, ActorSerializer()),
"isBookmarked": attrs["is_bookmarked"],
"isSubscribed": is_subscribed,
"subscriptionDetails": subscription_details,
"hasSeen": attrs["has_seen"],
"annotations": attrs["annotations"],
}
@register(Group)
class GroupSerializer(GroupSerializerBase):
def __init__(self, environment_func=None):
self.environment_func = environment_func if environment_func is not None else lambda: None
def _get_seen_stats(self, item_list, user):
try:
environment = self.environment_func()
except Environment.DoesNotExist:
user_counts = {}
first_seen = {}
last_seen = {}
times_seen = {}
else:
project_id = item_list[0].project_id
item_ids = [g.id for g in item_list]
user_counts = tagstore.get_groups_user_counts(
[project_id], item_ids, environment_ids=environment and [environment.id]
)
first_seen = {}
last_seen = {}
times_seen = {}
if environment is not None:
environment_tagvalues = tagstore.get_group_list_tag_value(
[project_id], item_ids, [environment.id], "environment", environment.name
)
for item_id, value in environment_tagvalues.items():
first_seen[item_id] = value.first_seen
last_seen[item_id] = value.last_seen
times_seen[item_id] = value.times_seen
else:
for item in item_list:
first_seen[item.id] = item.first_seen
last_seen[item.id] = item.last_seen
times_seen[item.id] = item.times_seen
attrs = {}
for item in item_list:
attrs[item] = {
"times_seen": times_seen.get(item.id, 0),
"first_seen": first_seen.get(item.id), # TODO: missing?
"last_seen": last_seen.get(item.id),
"user_count": user_counts.get(item.id, 0),
}
return attrs
class GroupStatsMixin(object):
STATS_PERIOD_CHOICES = {
"14d": StatsPeriod(14, timedelta(hours=24)),
"24h": StatsPeriod(24, timedelta(hours=1)),
}
def query_tsdb(self, group_ids, query_params):
raise NotImplementedError
def get_stats(self, item_list, user):
if self.stats_period:
# we need to compute stats at 1d (1h resolution), and 14d
group_ids = [g.id for g in item_list]
segments, interval = self.STATS_PERIOD_CHOICES[self.stats_period]
now = timezone.now()
query_params = {
"start": now - ((segments - 1) * interval),
"end": now,
"rollup": int(interval.total_seconds()),
}
return self.query_tsdb(group_ids, query_params)
class StreamGroupSerializer(GroupSerializer, GroupStatsMixin):
def __init__(
self,
environment_func=None,
stats_period=None,
matching_event_id=None,
matching_event_environment=None,
):
super(StreamGroupSerializer, self).__init__(environment_func)
if stats_period is not None:
assert stats_period in self.STATS_PERIOD_CHOICES
self.stats_period = stats_period
self.matching_event_id = matching_event_id
self.matching_event_environment = matching_event_environment
def query_tsdb(self, group_ids, query_params):
try:
environment = self.environment_func()
except Environment.DoesNotExist:
stats = {key: tsdb.make_series(0, **query_params) for key in group_ids}
else:
stats = tsdb.get_range(
model=tsdb.models.group,
keys=group_ids,
environment_ids=environment and [environment.id],
**query_params
)
return stats
def get_attrs(self, item_list, user):
attrs = super(StreamGroupSerializer, self).get_attrs(item_list, user)
if self.stats_period:
stats = self.get_stats(item_list, user)
for item in item_list:
attrs[item].update({"stats": stats[item.id]})
return attrs
def serialize(self, obj, attrs, user):
result = super(StreamGroupSerializer, self).serialize(obj, attrs, user)
if self.stats_period:
result["stats"] = {self.stats_period: attrs["stats"]}
if self.matching_event_id:
result["matchingEventId"] = self.matching_event_id
if self.matching_event_environment:
result["matchingEventEnvironment"] = self.matching_event_environment
return result
class TagBasedStreamGroupSerializer(StreamGroupSerializer):
def __init__(self, tags, **kwargs):
super(TagBasedStreamGroupSerializer, self).__init__(**kwargs)
self.tags = tags
def serialize(self, obj, attrs, user):
result = super(TagBasedStreamGroupSerializer, self).serialize(obj, attrs, user)
result["tagLastSeen"] = self.tags[obj.id].last_seen
result["tagFirstSeen"] = self.tags[obj.id].first_seen
return result
class SharedGroupSerializer(GroupSerializer):
def serialize(self, obj, attrs, user):
result = super(SharedGroupSerializer, self).serialize(obj, attrs, user)
del result["annotations"]
return result
class GroupSerializerSnuba(GroupSerializerBase):
def __init__(self, environment_ids=None, start=None, end=None):
self.environment_ids = environment_ids
self.start = start
self.end = end
def _get_seen_stats(self, item_list, user):
tagstore = SnubaTagStorage()
project_ids = list(set([item.project_id for item in item_list]))
group_ids = [item.id for item in item_list]
user_counts = tagstore.get_groups_user_counts(
project_ids,
group_ids,
environment_ids=self.environment_ids,
start=self.start,
end=self.end,
)
first_seen = {}
last_seen = {}
times_seen = {}
if not self.environment_ids:
# use issue fields
for item in item_list:
first_seen[item.id] = item.first_seen
last_seen[item.id] = item.last_seen
times_seen[item.id] = item.times_seen
else:
seen_data = tagstore.get_group_seen_values_for_environments(
project_ids, group_ids, self.environment_ids, start=self.start, end=self.end
)
first_seen_data = {
ge["group_id"]: ge["first_seen__min"]
for ge in GroupEnvironment.objects.filter(
group_id__in=[item.id for item in item_list],
environment_id__in=self.environment_ids,
)
.values("group_id")
.annotate(Min("first_seen"))
}
for item_id, value in seen_data.items():
first_seen[item_id] = first_seen_data.get(item_id)
last_seen[item_id] = value["last_seen"]
times_seen[item_id] = value["times_seen"]
attrs = {}
for item in item_list:
attrs[item] = {
"times_seen": times_seen.get(item.id, 0),
"first_seen": first_seen.get(item.id),
"last_seen": last_seen.get(item.id),
"user_count": user_counts.get(item.id, 0),
}
return attrs
class StreamGroupSerializerSnuba(GroupSerializerSnuba, GroupStatsMixin):
def __init__(self, environment_ids=None, stats_period=None, matching_event_id=None):
super(StreamGroupSerializerSnuba, self).__init__(environment_ids)
if stats_period is not None:
assert stats_period in self.STATS_PERIOD_CHOICES
self.stats_period = stats_period
self.matching_event_id = matching_event_id
def query_tsdb(self, group_ids, query_params):
return snuba_tsdb.get_range(
model=snuba_tsdb.models.group,
keys=group_ids,
environment_ids=self.environment_ids,
**query_params
)
def get_attrs(self, item_list, user):
attrs = super(StreamGroupSerializerSnuba, self).get_attrs(item_list, user)
if self.stats_period:
stats = self.get_stats(item_list, user)
for item in item_list:
attrs[item].update({"stats": stats[item.id]})
return attrs
def serialize(self, obj, attrs, user):
result = super(StreamGroupSerializerSnuba, self).serialize(obj, attrs, user)
if self.stats_period:
result["stats"] = {self.stats_period: attrs["stats"]}
if self.matching_event_id:
result["matchingEventId"] = self.matching_event_id
return result
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the mask module.
"""
import astropy.units as u
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from ..bounding_box import BoundingBox
from ..circle import CircularAperture, CircularAnnulus
from ..mask import ApertureMask
from ..rectangle import RectangularAnnulus
POSITIONS = [(-20, -20), (-20, 20), (20, -20), (60, 60)]
def test_mask_input_shapes():
with pytest.raises(ValueError):
mask_data = np.ones((10, 10))
bbox = BoundingBox(5, 10, 5, 10)
ApertureMask(mask_data, bbox)
def test_mask_array():
mask_data = np.ones((10, 10))
bbox = BoundingBox(5, 15, 5, 15)
mask = ApertureMask(mask_data, bbox)
data = np.array(mask)
assert_allclose(data, mask.data)
def test_mask_get_overlap_slices():
aper = CircularAperture((5, 5), r=10.)
mask = aper.to_mask()
slc = ((slice(0, 16, None), slice(0, 16, None)),
(slice(5, 21, None), slice(5, 21, None)))
assert mask.get_overlap_slices((25, 25)) == slc
def test_mask_cutout_shape():
mask_data = np.ones((10, 10))
bbox = BoundingBox(5, 15, 5, 15)
mask = ApertureMask(mask_data, bbox)
with pytest.raises(ValueError):
mask.cutout(np.arange(10))
with pytest.raises(ValueError):
mask.to_image((10,))
def test_mask_cutout_copy():
data = np.ones((50, 50))
aper = CircularAperture((25, 25), r=10.)
mask = aper.to_mask()
cutout = mask.cutout(data, copy=True)
data[25, 25] = 100.
assert cutout[10, 10] == 1.
# test quantity data
data2 = np.ones((50, 50)) * u.adu
cutout2 = mask.cutout(data2, copy=True)
assert cutout2.unit == data2.unit
data2[25, 25] = 100. * u.adu
assert cutout2[10, 10].value == 1.
@pytest.mark.parametrize('position', POSITIONS)
def test_mask_cutout_no_overlap(position):
data = np.ones((50, 50))
aper = CircularAperture(position, r=10.)
mask = aper.to_mask()
cutout = mask.cutout(data)
assert cutout is None
weighted_data = mask.multiply(data)
assert weighted_data is None
image = mask.to_image(data.shape)
assert image is None
@pytest.mark.parametrize('position', POSITIONS)
def test_mask_cutout_partial_overlap(position):
data = np.ones((50, 50))
aper = CircularAperture(position, r=30.)
mask = aper.to_mask()
cutout = mask.cutout(data)
assert cutout.shape == mask.shape
weighted_data = mask.multiply(data)
assert weighted_data.shape == mask.shape
image = mask.to_image(data.shape)
assert image.shape == data.shape
def test_mask_multiply():
radius = 10.
data = np.ones((50, 50))
aper = CircularAperture((25, 25), r=radius)
mask = aper.to_mask()
data_weighted = mask.multiply(data)
assert_almost_equal(np.sum(data_weighted), np.pi * radius**2)
# test that multiply() returns a copy
data[25, 25] = 100.
assert data_weighted[10, 10] == 1.
def test_mask_multiply_quantity():
radius = 10.
data = np.ones((50, 50)) * u.adu
aper = CircularAperture((25, 25), r=radius)
mask = aper.to_mask()
data_weighted = mask.multiply(data)
assert data_weighted.unit == u.adu
assert_almost_equal(np.sum(data_weighted.value), np.pi * radius**2)
# test that multiply() returns a copy
data[25, 25] = 100. * u.adu
assert data_weighted[10, 10].value == 1.
@pytest.mark.parametrize('value', (np.nan, np.inf))
def test_mask_nonfinite_fill_value(value):
aper = CircularAnnulus((0, 0), 10, 20)
data = np.ones((101, 101)).astype(int)
cutout = aper.to_mask().cutout(data, fill_value=value)
assert ~np.isfinite(cutout[0, 0])
def test_mask_multiply_fill_value():
aper = CircularAnnulus((0, 0), 10, 20)
data = np.ones((101, 101)).astype(int)
cutout = aper.to_mask().multiply(data, fill_value=np.nan)
xypos = ((20, 20), (5, 5), (5, 35), (35, 5), (35, 35))
for x, y in xypos:
assert np.isnan(cutout[y, x])
def test_mask_nonfinite_in_bbox():
"""
Regression test that non-finite data values outside of the mask but
within the bounding box are set to zero.
"""
data = np.ones((101, 101))
data[33, 33] = np.nan
data[67, 67] = np.inf
data[33, 67] = -np.inf
data[22, 22] = np.nan
data[22, 23] = np.inf
radius = 20.
aper1 = CircularAperture((50, 50), r=radius)
aper2 = CircularAperture((5, 5), r=radius)
wdata1 = aper1.to_mask(method='exact').multiply(data)
assert_allclose(np.sum(wdata1), np.pi * radius**2)
wdata2 = aper2.to_mask(method='exact').multiply(data)
assert_allclose(np.sum(wdata2), 561.6040111923013)
def test_mask_get_values():
aper = CircularAnnulus(((0, 0), (50, 50), (100, 100)), 10, 20)
data = np.ones((101, 101))
values = [mask.get_values(data) for mask in aper.to_mask()]
shapes = [val.shape for val in values]
sums = [np.sum(val) for val in values]
assert shapes[0] == (278,)
assert shapes[1] == (1068,)
assert shapes[2] == (278,)
sums_expected = (245.621534, 942.477796, 245.621534)
assert_allclose(sums, sums_expected)
def test_mask_get_values_no_overlap():
aper = CircularAperture((-100, -100), r=3)
data = np.ones((51, 51))
values = aper.to_mask().get_values(data)
assert values.shape == (0,)
def test_mask_get_values_mask():
aper = CircularAperture((24.5, 24.5), r=10.)
data = np.ones((51, 51))
mask = aper.to_mask()
with pytest.raises(ValueError):
mask.get_values(data, mask=np.ones(3))
arr = mask.get_values(data, mask=None)
assert_allclose(np.sum(arr), 100. * np.pi)
data_mask = np.zeros(data.shape, dtype=bool)
data_mask[25:] = True
arr2 = mask.get_values(data, mask=data_mask)
assert_allclose(np.sum(arr2), 100. * np.pi / 2.)
def test_rectangular_annulus_hin():
aper = RectangularAnnulus((25, 25), 2, 4, 20, h_in=18, theta=0)
mask = aper.to_mask(method='center')
assert mask.data.shape == (21, 5)
assert np.count_nonzero(mask.data) == 40
|
|
# engine/url.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates
information about a database connection specification.
The URL object is created automatically when :func:`~sqlalchemy.engine.create_engine` is called
with a string argument; alternatively, the URL is a public-facing construct which can
be used directly and is also accepted directly by ``create_engine()``.
"""
import re, urllib
from sqlalchemy import exc, util
class URL(object):
"""
Represent the components of a URL used to connect to a database.
This object is suitable to be passed directly to a
``create_engine()`` call. The fields of the URL are parsed from a
string by the ``module-level make_url()`` function. the string
format of the URL is an RFC-1738-style string.
All initialization parameters are available as public attributes.
:param drivername: the name of the database backend.
This name will correspond to a module in sqlalchemy/databases
or a third party plug-in.
:param username: The user name.
:param password: database password.
:param host: The name of the host.
:param port: The port number.
:param database: The database name.
:param query: A dictionary of options to be passed to the
dialect and/or the DBAPI upon connect.
"""
def __init__(self, drivername, username=None, password=None,
host=None, port=None, database=None, query=None):
self.drivername = drivername
self.username = username
self.password = password
self.host = host
if port is not None:
self.port = int(port)
else:
self.port = None
self.database = database
self.query = query or {}
def __str__(self):
s = self.drivername + "://"
if self.username is not None:
s += self.username
if self.password is not None:
s += ':' + urllib.quote_plus(self.password)
s += "@"
if self.host is not None:
s += self.host
if self.port is not None:
s += ':' + str(self.port)
if self.database is not None:
s += '/' + self.database
if self.query:
keys = self.query.keys()
keys.sort()
s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys)
return s
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return \
isinstance(other, URL) and \
self.drivername == other.drivername and \
self.username == other.username and \
self.password == other.password and \
self.host == other.host and \
self.database == other.database and \
self.query == other.query
def get_dialect(self):
"""Return the SQLAlchemy database dialect class corresponding
to this URL's driver name.
"""
try:
if '+' in self.drivername:
dialect, driver = self.drivername.split('+')
else:
dialect, driver = self.drivername, 'base'
module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects
module = getattr(module, dialect)
if hasattr(module, driver):
module = getattr(module, driver)
else:
module = self._load_entry_point()
if module is None:
raise exc.ArgumentError(
"Could not determine dialect for '%s'." %
self.drivername)
return module.dialect
except ImportError:
module = self._load_entry_point()
if module is not None:
return module
else:
raise exc.ArgumentError(
"Could not determine dialect for '%s'." % self.drivername)
def _load_entry_point(self):
"""attempt to load this url's dialect from entry points, or return None
if pkg_resources is not installed or there is no matching entry point.
Raise ImportError if the actual load fails.
"""
try:
import pkg_resources
except ImportError:
return None
for res in pkg_resources.iter_entry_points('sqlalchemy.dialects'):
if res.name == self.drivername.replace("+", "."):
return res.load()
else:
return None
def translate_connect_args(self, names=[], **kw):
"""Translate url attributes into a dictionary of connection arguments.
Returns attributes of this url (`host`, `database`, `username`,
`password`, `port`) as a plain dictionary. The attribute names are
used as the keys by default. Unset or false attributes are omitted
from the final dictionary.
:param \**kw: Optional, alternate key names for url attributes.
:param names: Deprecated. Same purpose as the keyword-based alternate names,
but correlates the name to the original positionally.
"""
translated = {}
attribute_names = ['host', 'database', 'username', 'password', 'port']
for sname in attribute_names:
if names:
name = names.pop(0)
elif sname in kw:
name = kw[sname]
else:
name = sname
if name is not None and getattr(self, sname, False):
translated[name] = getattr(self, sname)
return translated
def make_url(name_or_url):
"""Given a string or unicode instance, produce a new URL instance.
The given string is parsed according to the RFC 1738 spec. If an
existing URL object is passed, just returns the object.
"""
if isinstance(name_or_url, basestring):
return _parse_rfc1738_args(name_or_url)
else:
return name_or_url
def _parse_rfc1738_args(name):
pattern = re.compile(r'''
(?P<name>[\w\+]+)://
(?:
(?P<username>[^:/]*)
(?::(?P<password>[^/]*))?
@)?
(?:
(?P<host>[^/:]*)
(?::(?P<port>[^/]*))?
)?
(?:/(?P<database>.*))?
'''
, re.X)
m = pattern.match(name)
if m is not None:
components = m.groupdict()
if components['database'] is not None:
tokens = components['database'].split('?', 2)
components['database'] = tokens[0]
query = (len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None
# Py2K
if query is not None:
query = dict((k.encode('ascii'), query[k]) for k in query)
# end Py2K
else:
query = None
components['query'] = query
if components['password'] is not None:
components['password'] = urllib.unquote_plus(components['password'])
name = components.pop('name')
return URL(name, **components)
else:
raise exc.ArgumentError(
"Could not parse rfc1738 URL from string '%s'" % name)
def _parse_keyvalue_args(name):
m = re.match( r'(\w+)://(.*)', name)
if m is not None:
(name, args) = m.group(1, 2)
opts = dict( util.parse_qsl( args ) )
return URL(name, *opts)
else:
return None
|
|
import itertools as itt
import numpy as np
import pytest
import mbuild as mb
from mbuild.coordinate_transform import (
AxisTransform,
ChangeOfBasis,
CoordinateTransform,
RigidTransform,
Rotation,
RotationAroundX,
RotationAroundY,
RotationAroundZ,
Translation,
_spin,
angle,
force_overlap,
rotate,
rotate_around_x,
rotate_around_y,
rotate_around_z,
spin,
spin_x,
spin_y,
spin_z,
translate,
translate_to,
x_axis_transform,
y_axis_transform,
z_axis_transform,
)
from mbuild.tests.base_test import BaseTest
from mbuild.utils.exceptions import RemovedFuncError
class TestCoordinateTransform(BaseTest):
def test_apply_to(self):
double = CoordinateTransform(T=np.eye(4) * 2)
A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert (
double.apply_to(A)
== np.array([[2, 4, 6], [8, 10, 12], [14, 16, 18]])
).all()
def test_translation(self):
translation = Translation((10, 10, 10))
assert (
translation.apply_to(np.array([[1, 1, 1]]))
== np.array([11, 11, 11])
).all()
def test_rotation_around_z(self):
Z_rotation = RotationAroundZ(np.pi)
a = Z_rotation.apply_to(np.array([[2, 3, 4]]))
b = np.array([[-2.0, -3.0, 4.0]])
assert np.allclose(a, b, atol=1.0e-16)
def test_rotation_around_y(self):
Y_rotation = RotationAroundY(np.pi)
a = Y_rotation.apply_to(np.array([[2, 3, 4]]))
b = np.array([-2, 3, -4])
assert np.allclose(a, b, atol=1.0e-16)
def test_rotation_around_x(self):
X_rotation = RotationAroundX(np.pi)
a = X_rotation.apply_to(np.array([[2, 3, 4]]))
b = np.array([2, -3, -4])
assert np.allclose(a, b, atol=1.0e-16)
def test_rotation(self):
rotation = Rotation(np.pi * 2 / 3, np.array([1, 1, 1]))
a = rotation.apply_to(np.array([[2, 3, 4]]))
b = np.array([4, 2, 3])
assert np.allclose(a, b, atol=1.0e-16)
def test_change_of_basis(self):
change_basis = ChangeOfBasis(
np.array([[-2, 0, 0], [0, -2, 0], [0, 0, -2]])
)
assert (
change_basis.apply_to(np.array([[2, 3, 4]]))
== np.array([[-1.0, -1.5, -2.0]])
).all()
def test_axis_transform(self):
origin_transform = AxisTransform(new_origin=np.array([1, 1, 1]))
assert (
origin_transform.apply_to(np.array([[1, 1, 1]]))
== np.array([[0, 0, 0]])
).all()
orientation_transform = AxisTransform(
point_on_x_axis=np.array([0, 0, 1]),
point_on_xy_plane=np.array([0, 1, 1]),
)
assert (
orientation_transform.apply_to(np.array([[2, 3, 4]]))
== np.array([[4, 3, -2]])
).all()
axis_transform = AxisTransform(
np.array([1, 1, 1]), np.array([1, 1, 2]), np.array([1, 2, 1])
)
assert (
axis_transform.apply_to(np.array([[2, 3, 4]]))
== np.array([3, 2, -1])
).all()
def test_rigid_transform(self):
A = np.array([[2, 3, 4]])
B = np.array([[3, -4, 9]])
rigid_transform = RigidTransform(A, B)
assert (rigid_transform.apply_to(np.array([[2, 3, 4]])) == B).all()
def test_rotate_0(self, methane):
before = methane.xyz_with_ports
methane.rotate(0.0, np.asarray([1.0, 0.0, 0.0]))
after = methane.xyz_with_ports
assert np.array_equal(before, after)
def test_rotate_2pi(self, methane):
before = methane.xyz_with_ports
methane.rotate(2 * np.pi, np.asarray([1.0, 0.0, 0.0]))
after = methane.xyz_with_ports
assert np.allclose(before, after)
def test_rotate_zero_vector(self, methane):
with pytest.raises(ValueError):
methane.rotate(np.pi / 2, np.asarray([0.0, 0.0, 0.0]))
def test_spin_zero_vector(self, methane):
with pytest.raises(ValueError):
methane.spin(np.pi / 2, np.asarray([0.0, 0.0, 0.0]))
def test_spin_inputs(self, methane):
methane.spin(6.9, [1, 0, 0])
methane.spin(6.9, (1, 0, 0))
def test_rotate_inputs(self, methane):
methane.rotate(6.9, [1, 0, 0])
methane.rotate(6.9, (1, 0, 0))
def test_spin_too_many_dimensions_list(self, methane):
with pytest.raises(ValueError):
methane.spin(0.1, [1, 0, 0, 0])
def test_spin_too_many_dimensions_tuple(self, methane):
with pytest.raises(ValueError):
methane.spin(0.1, (1, 0, 0, 0))
def test_rotate_too_many_dimensions_list(self, methane):
with pytest.raises(ValueError):
methane.rotate(0.1, [1, 0, 0, 0])
def test_rotate_too_many_dimensions_tuple(self, methane):
with pytest.raises(ValueError):
methane.rotate(0.1, (1, 0, 0, 0))
def test_spin_too_few_dimensions_list(self, methane):
with pytest.raises(ValueError):
methane.spin(0.1, [1, 0])
def test_spin_too_few_dimensions_tuple(self, methane):
with pytest.raises(ValueError):
methane.spin(0.1, (1, 0))
def test_rotate_too_few_dimensions_list(self, methane):
with pytest.raises(ValueError):
methane.rotate(0.1, [1, 0])
def test_rotate_too_few_dimensions_tuple(self, methane):
with pytest.raises(ValueError):
methane.rotate(0.1, (1, 0))
def test_spin_360x(self, methane):
before = methane.xyz_with_ports
methane.spin(2 * np.pi, np.asarray([1, 0, 0]))
assert np.allclose(before, methane.xyz_with_ports, atol=1e-16)
def test_spin_360y(self, methane):
before = methane.xyz_with_ports
methane.spin(2 * np.pi, np.asarray([0, 1, 0]))
assert np.allclose(before, methane.xyz_with_ports, atol=1e-16)
def test_spin_360z(self, methane):
before = methane.xyz_with_ports
methane.spin(2 * np.pi, np.asarray([0, 0, 1]))
assert np.allclose(before, methane.xyz_with_ports, atol=1e-16)
def test_spin_0x(self, methane):
before = methane.xyz_with_ports
methane.spin(0, np.asarray([1, 0, 0]))
assert np.allclose(before, methane.xyz_with_ports, atol=1e-16)
def test_spin_0y(self, methane):
before = methane.xyz_with_ports
methane.spin(0, np.asarray([0, 1, 0]))
assert np.allclose(before, methane.xyz_with_ports, atol=1e-16)
def test_spin_0z(self, methane):
before = methane.xyz_with_ports
methane.spin(0, np.asarray([0, 0, 1]))
assert np.allclose(before, methane.xyz_with_ports, atol=1e-16)
def test_spin_x(self, sixpoints):
before = mb.clone(sixpoints)
sixpoints.spin(np.pi, np.asarray([1, 0, 0]))
assert np.allclose(
sixpoints["up"].xyz, before["down"].xyz, atol=1e-16
) and np.allclose(
sixpoints["front"].xyz, before["back"].xyz, atol=1e-16
)
def test_spin_y(self, sixpoints):
before = mb.clone(sixpoints)
sixpoints.spin(np.pi, np.asarray([0, 1, 0]))
assert np.allclose(
sixpoints["left"].xyz, before["right"].xyz, atol=1e-16
) and np.allclose(
sixpoints["front"].xyz, before["back"].xyz, atol=1e-16
)
def test_spin_z(self, sixpoints):
before = mb.clone(sixpoints)
sixpoints.spin(np.pi, np.asarray([0, 0, 1]))
assert np.allclose(
sixpoints["left"].xyz, before["right"].xyz, atol=1e-16
) and np.allclose(sixpoints["up"].xyz, before["down"].xyz, atol=1e-16)
def test_spin_x_eq(self, sixpoints):
compound2 = mb.clone(sixpoints)
sixpoints.spin(np.pi * 1.23456789, np.asarray([1.0, 0.0, 0.0]))
compound2.spin(np.pi * 1.23456789, around=np.asarray([1, 0, 0]))
assert np.allclose(compound2.xyz, sixpoints.xyz, atol=1e-16)
def test_spin_y_eq(self, sixpoints):
compound2 = mb.clone(sixpoints)
sixpoints.spin(np.pi * 1.23456789, np.asarray([0.0, 1.0, 0.0]))
compound2.spin(np.pi * 1.23456789, around=np.asarray([0, 1, 0]))
assert np.allclose(compound2.xyz, sixpoints.xyz, atol=1e-16)
def test_spin_z_eq(self, sixpoints):
compound2 = mb.clone(sixpoints)
sixpoints.spin(np.pi * 1.23456789, np.asarray([0.0, 0.0, 1.0]))
compound2.spin(np.pi * 1.23456789, around=np.asarray([0, 0, 1]))
assert np.allclose(compound2.xyz, sixpoints.xyz, atol=1e-16)
def test_spin_deprecated_x(self, sixpoints):
with pytest.raises(RemovedFuncError):
spin_x(sixpoints, np.pi * 3 / 2)
def test_spin_deprecated_y(self, sixpoints):
with pytest.raises(RemovedFuncError):
spin_y(sixpoints, np.pi * 3 / 2)
def test_spin_deprecated_z(self, sixpoints):
with pytest.raises(RemovedFuncError):
spin_z(sixpoints, 69)
def test_spin_arbitraty(self, sixpoints):
before = mb.clone(sixpoints)
sixpoints.spin(np.pi, np.asarray([1, 1, 0]))
assert np.allclose(
sixpoints["up"].xyz, before["right"].xyz, atol=1e-16
) and np.allclose(sixpoints["down"].xyz, before["left"].xyz, atol=1e-16)
def test_error_rotate_x(self, methane):
with pytest.raises(RemovedFuncError):
rotate_around_x(methane, np.pi)
def test_error_rotate_y(self, methane):
with pytest.raises(RemovedFuncError):
rotate_around_y(methane, np.pi)
def test_error_rotate_z(self, methane):
with pytest.raises(RemovedFuncError):
rotate_around_z(methane, np.pi)
def test_spin_relative_compound_coordinates(self, sixpoints):
"""Check compounds's relative coordinates don't change upon spinning"""
np.random.seed(0)
angles_before = np.asarray(
[angle(a, b, c) for (a, b, c) in itt.combinations(sixpoints.xyz, 3)]
)
sixpoints.spin(np.pi * 0.1234569789, np.random.rand(3))
angles_after = np.asarray(
[angle(a, b, c) for (a, b, c) in itt.combinations(sixpoints.xyz, 3)]
)
assert np.allclose(angles_before, angles_after, atol=1e-15)
def test_equivalence_transform_deprectation_warning(self, ch2):
ch22 = mb.clone(ch2)
with pytest.warns(DeprecationWarning):
mb.equivalence_transform(
ch22, from_positions=ch22["up"], to_positions=ch2["down"]
)
def test_rotate_around_x(self, methane):
before = methane.xyz_with_ports
methane.rotate(np.pi, around=np.asarray([1, 0, 0]))
after = methane.xyz_with_ports
assert np.allclose(
before[:, 1], -1 * after[:, 1], atol=1e-16
) and np.allclose(before[:, 2], -1 * after[:, 2], atol=1e-16)
def test_rotate_around_y(self, ch2):
before = ch2.xyz_with_ports
ch2.rotate(np.pi, around=np.asarray([0, 1, 0]))
after = ch2.xyz_with_ports
assert np.allclose(
before[:, 0], -1 * after[:, 0], atol=1e-16
) and np.allclose(before[:, 2], -1 * after[:, 2], atol=1e-16)
def test_rotate_around_z(self, ch2):
before = ch2.xyz_with_ports
ch2.rotate(np.pi, around=np.asarray([0, 0, 1]))
after = ch2.xyz_with_ports
assert np.allclose(
before[:, 0], -1 * after[:, 0], atol=1e-16
) and np.allclose(before[:, 1], -1 * after[:, 1], atol=1e-16)
def test_rotate_around_x_away_from_origin(self, sixpoints):
before = sixpoints.xyz_with_ports
sixpoints.rotate(np.pi, around=np.asarray([1, 0, 0]))
after = sixpoints.xyz_with_ports
assert np.allclose(
before[:, 1], -1 * after[:, 1], atol=1e-16
) and np.allclose(before[:, 2], -1 * after[:, 2], atol=1e-16)
def test_rotate_around_y_away_from_origin(self, sixpoints):
before = sixpoints.xyz_with_ports
sixpoints.rotate(np.pi, around=np.asarray([0, 1, 0]))
after = sixpoints.xyz_with_ports
assert np.allclose(
before[:, 0], -1 * after[:, 0], atol=1e-16
) and np.allclose(before[:, 2], -1 * after[:, 2], atol=1e-16)
def test_rotate_around_z_away_from_origin(self, sixpoints):
before = sixpoints.xyz_with_ports
sixpoints.rotate(np.pi, around=np.asarray([0, 0, 1]))
after = sixpoints.xyz_with_ports
assert np.allclose(
before[:, 1], -1 * after[:, 1], atol=1e-16
) and np.allclose(before[:, 0], -1 * after[:, 0], atol=1e-16)
def test_equivalence_transform(self, ch2, ch3, methane):
ch2_atoms = list(ch2.particles())
methane_atoms = list(methane.particles())
force_overlap(ch2, ch2_atoms[0], methane_atoms[0], add_bond=False)
assert (ch2_atoms[0].pos == methane_atoms[0].pos).all()
force_overlap(ch2, ch2["up"], ch3["up"])
assert ch2.n_bonds == 3
assert ch2.root.bond_graph.number_of_edges() == 3
assert ch3.root.bond_graph.number_of_edges() == 4
ethyl = mb.Compound([ch2, ch3])
assert ethyl.n_bonds == 6
def test_translate(self, methane):
methane_atoms = list(methane.particles())
methane.translate(-methane_atoms[0].pos)
assert (methane_atoms[0].pos == np.array([0, 0, 0])).all()
def test_translate_to(self, methane):
before = methane.xyz_with_ports
original_center = methane.center
translate_value = np.array([2, 3, 4])
methane.translate_to(translate_value)
assert (
methane.xyz_with_ports == before - original_center + translate_value
).all()
def test_different_translates(self, methane):
shifted = mb.clone(methane)
shifted.translate([5, 4, 3])
shifted_methane_coords = mb.coordinate_transform._translate(
methane.xyz, [5, 4, 3]
)
assert np.array_equal(shifted_methane_coords, shifted.xyz)
def test_different_translate_tos_origin(self, methane):
shifted = mb.clone(methane)
shifted.translate_to([0, 0, 0])
x = mb.coordinate_transform._translate_to(methane.xyz, [0, 0, 0])
assert np.array_equal(shifted.xyz, x)
def test_different_translate_tos_not_origin(self, methane):
shifted = mb.clone(methane)
np.random.seed(0)
point = np.random.rand(3)
shifted.translate_to(point)
x = mb.coordinate_transform._translate_to(methane.xyz, point)
assert np.array_equal(shifted.xyz, x)
def test_spin(self):
points = np.asarray(
[[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0]],
dtype=float,
)
new_points_should_be = np.asarray(
[[0, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0], [1, 0, 0]],
dtype=float,
)
spun_points = _spin(points, np.pi / 2, [0, 0, 1])
assert np.allclose(spun_points, new_points_should_be, atol=1e-15)
def test_spin_away_from_origin(self):
points = np.asarray(
[[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0]],
dtype=float,
)
points += [2, 2, 69]
new_points_should_be = np.asarray(
[[2, 2, 69], [2, 3, 69], [1, 2, 69], [2, 1, 69], [3, 2, 69]],
dtype=float,
)
spun_points = _spin(points, np.pi / 2, [0, 0, 1])
assert np.allclose(spun_points, new_points_should_be, atol=1e-15)
def test_xyz_axis_transform(self):
rot_by_compound = mb.Compound(name="rot_by_compound")
b = mb.Compound(name="b")
c = mb.Compound(name="c")
d = mb.Compound(name="d")
rot_by_array = mb.Compound(name="rot_by_array")
b.pos = np.array([0, 0, 0])
c.pos = np.array([0.5, 0.5, 0.5])
d.pos = np.array([1, 0, 1])
array1 = np.array([0, 0, 0])
array2 = np.array([0.5, 0.5, 0.5])
array3 = np.array([1, 0, 1])
x_axis_transform(rot_by_compound, b, c, d)
x_axis_transform(rot_by_array, array1, array2, array3)
assert np.array_equal(rot_by_compound.pos, rot_by_array.pos)
|
|
import numpy as np
import cv2
import time
import os
import psutil
import gc
from grid_game import GridGame
from util.mem_convert import bytes2human
class Environment:
"""docstring for Environment"""
BUFFER_LEN = 1
EPISODE_STEPS = 18000
EPOCH_COUNT = 10
EPOCH_STEPS = 10000
EVAL_EPS = 0.001
FRAMES_SKIP = 1
FRAME_HEIGHT = 4
FRAME_WIDTH = 4
MAX_NO_OP = 0
MAX_REWARD = 0
def __init__(self, rng, one_state = False, display_screen = False):
self.height = Environment.FRAME_HEIGHT
self.width = Environment.FRAME_WIDTH
self.api = GridGame(self.height, self.width, rng)
self.rng = rng
self.display_screen = display_screen
self.minimal_actions = self.api.getMinimalActionSet()
self.repeat = Environment.FRAMES_SKIP
self.buffer_len = Environment.BUFFER_LEN
self.eval_eps = Environment.EVAL_EPS
self.merge_frame = np.zeros((self.buffer_len
, self.height
, self.width)
, dtype = np.uint8)
self.merge_id = 0
self.max_reward = Environment.MAX_REWARD
self.log_dir = ''
self.network_dir = ''
print self.minimal_actions
def get_action_count(self):
return len(self.minimal_actions)
def train(self, agent, store_freq, folder = None, start_epoch = 0
, ask_for_more = False):
self._open_log_files(agent, folder)
obs = np.zeros((self.height, self.width), dtype = np.uint8)
epoch_count = Environment.EPOCH_COUNT
self.need_reset = True
epoch = start_epoch
epoch_count = Environment.EPOCH_COUNT
while epoch < epoch_count:
steps_left = Environment.EPOCH_STEPS
print "\n" + "=" * 50
print "Epoch #%d" % (epoch + 1)
episode = 0
train_start = time.time()
while steps_left > 0:
num_step, _ = self._run_episode(agent, steps_left, obs)
steps_left -= num_step
episode += 1
if steps_left == 0 or episode % 100 == 0:
print "Finished episode #%d, steps_left = %d" \
% (episode, steps_left)
train_end = time.time()
valid_values = agent.get_validate_values()
eval_values = self.evaluate(agent)
test_end = time.time()
train_time = train_end - train_start
test_time = test_end - train_end
step_per_sec = Environment.EPOCH_STEPS * 1. / max(1, train_time)
print "\tFinished epoch #%d, episode trained = %d\n" \
"\tValidate values = %.3f, evaluate reward = %.3f\n"\
"\tTrain time = %.0fs, test time = %.0fs, steps/sec = %.4f" \
% (epoch + 1, episode, valid_values, eval_values\
, train_time, test_time, step_per_sec)
self._update_log_files(agent, epoch + 1, episode
, valid_values, eval_values
, train_time, test_time
, step_per_sec, store_freq)
gc.collect()
epoch += 1
if ask_for_more and epoch >= epoch_count:
st = raw_input("\n***Enter number of epoch to continue training: ")
more_epoch = 0
try:
more_epoch = int(st)
except Exception, e:
more_epoch = 0
epoch_count += more_epoch
def evaluate(self, agent, episodes = 30, obs = None):
print "\n***Start evaluating"
if obs is None:
obs = np.zeros((self.height, self.width), dtype = np.uint8)
sum_reward = 0.0
sum_step = 0.0
self.need_reset = True
for episode in xrange(episodes):
step, reward = self._run_episode(agent,
Environment.EPISODE_STEPS, obs, self.eval_eps, evaluating = True
, print_Q = self.display_screen)
sum_reward += reward
sum_step += step
print "Finished episode %d, reward = %d, step = %d" \
% (episode + 1, reward, step)
self.need_reset = True
print "Average reward per episode = %.4f" \
% (sum_reward / episodes)
print "Average step per episode = %.4f" % (sum_step / episodes)
return sum_reward / episodes
def _prepare_game(self):
if self.need_reset or self.api.game_over():
self.api.reset_game()
self.need_reset = False
if Environment.MAX_NO_OP > 0:
num_no_op = self.rng.randint(Environment.MAX_NO_OP + 1) \
+ self.buffer_len
for _ in xrange(num_no_op):
self.api.act(0)
for _ in xrange(self.buffer_len):
self._update_buffer()
def _run_episode(self, agent, steps_left, obs
, eps = 0.0, evaluating = False, print_Q = False):
self._prepare_game()
start_lives = self.api.lives()
step_count = 0
sum_reward = 0
is_terminal = False
while step_count < steps_left and not is_terminal:
self._get_screen(obs)
action_id, is_random = agent.get_action(obs, eps, evaluating)
reward = self._repeat_action(self.minimal_actions[action_id])
reward_clip = reward
if self.max_reward > 0:
reward_clip = np.clip(reward, -self.max_reward, self.max_reward)
if print_Q:
print "Observation = \n", np.int32(obs) - self.api.translate
print "Action%s = %d" % (" (random)" if is_random else ""
, self.minimal_actions[action_id])
print "Reward = %d" % (reward)
raw_input()
life_lost = not evaluating and self.api.lives() < start_lives
is_terminal = self.api.game_over() or life_lost \
or step_count + 1 >= steps_left
agent.add_experience(obs, is_terminal, action_id, reward_clip
, evaluating)
sum_reward += reward
step_count += 1
return step_count, sum_reward
def _update_buffer(self):
self.api.getScreenGrayscale(self.merge_frame[self.merge_id, ...])
self.merge_id = (self.merge_id + 1) % self.buffer_len
def _repeat_action(self, action):
reward = 0
for i in xrange(self.repeat):
reward += self.api.act(action)
if i + self.buffer_len >= self.repeat:
self._update_buffer()
return reward
def _get_screen(self, resized_frame):
self._resize_frame(self.merge_frame.max(axis = 0), resized_frame)
def _resize_frame(self, src_frame, dst_frame):
cv2.resize(src = src_frame, dst = dst_frame,
dsize = (self.width, self.height),
interpolation = cv2.INTER_LINEAR)
def _open_log_files(self, agent, folder):
time_str = time.strftime("_%m-%d-%H-%M", time.localtime())
base_rom_name = 'grid'
if folder is not None:
self.log_dir = folder
self.network_dir = self.log_dir + '/network'
return
self.log_dir = '../run_results/grid/' + base_rom_name + time_str
self.network_dir = self.log_dir + '/network'
try:
os.stat(self.log_dir)
except OSError:
os.makedirs(self.log_dir)
try:
os.stat(self.network_dir)
except OSError:
os.makedirs(self.network_dir)
with open(self.log_dir + '/info.txt', 'w') as f:
f.write(agent.get_info())
f.write(self.api.game_info() + '\n\n')
self._write_info(f, Environment)
self._write_info(f, agent.__class__)
self._write_info(f, agent.network.__class__)
with open(self.log_dir + '/results.csv', 'w') as f:
f.write("epoch,episode_train,validate_values,evaluate_reward"\
",train_time,test_time,steps_per_second\n")
mem = psutil.virtual_memory()
with open(self.log_dir + '/memory.csv', 'w') as f:
f.write("epoch,available,free,buffers,cached"\
",available_readable,used_percent\n")
f.write("%d,%d,%d,%d,%d,%s,%.1f\n" % \
(0, mem.available, mem.free, mem.buffers, mem.cached
, bytes2human(mem.available), mem.percent))
def _update_log_files(self, agent, epoch, episode, valid_values
, eval_values, train_time, test_time, step_per_sec
, store_freq):
print "Updating log files"
with open(self.log_dir + '/results.csv', 'a') as f:
f.write("%d,%d,%.4f,%.4f,%d,%d,%.4f\n" % \
(epoch, episode, valid_values, eval_values
, train_time, test_time, step_per_sec))
mem = psutil.virtual_memory()
with open(self.log_dir + '/memory.csv', 'a') as f:
f.write("%d,%d,%d,%d,%d,%s,%.1f\n" % \
(epoch, mem.available, mem.free, mem.buffers, mem.cached
, bytes2human(mem.available), mem.percent))
agent.dump_network(self.network_dir + ('/%03d' % (epoch)) + '.npz')
if (store_freq >= 0 and epoch >= Environment.EPOCH_COUNT) or \
(store_freq > 0 and (epoch % store_freq == 0)):
agent.dump_exp(self.network_dir + '/exp.npz')
def _write_info(self, f, c):
hyper_params = [attr for attr in dir(c) \
if not attr.startswith("__") and not callable(getattr(c, attr))]
for param in hyper_params:
f.write(str(c.__name__) + '.' + param + ' = ' + \
str(getattr(c, param)) + '\n')
f.write('\n')
|
|
import random
import json
import datetime
import signal
ENFORCED_TIME = 5
hrst_table = json.load(open('yay.json'))
class EnforcedTimeExecption(Exception):
pass
def EnforcedTimeHandler(signum, frame):
raise EnforcedTimeExecption()
class Player(object):
'''
Player Class Container.
'''
def __init__(self):
self.number_of_moves = 0
self.player_symbol = None
self.opponent_symbol = None
self.actual_board = [[]] # '-', 'x', 'o'
self.status_board = [] # '-', 'x', 'o'
self.backup_status_board = []
self.transposition_table = {}
self.heuristic_minimax_table = hrst_table
self.prev_time = datetime.datetime.now()
def init(self):
self.__init__()
def make_board_str(self):
string = ""
for i in xrange(0,9):
for j in xrange(0,9):
string += self.actual_board[i][j]
return string
def get_block_coords(self,block_number):
return {
0 : (0, 0),
1 : (3, 0),
2 : (6, 0),
3 : (0, 3),
4 : (3, 3),
5 : (6, 3),
6 : (0, 6),
7 : (3, 6),
8 : (6, 6),
}.get(block_number)
def get_status_of_block(self,block_number,current_block,our_symbol):
has_completed = False
first_win=0 #0=none 1=me 2=other
x,y = self.get_block_coords(block_number)
our_symbol = self.player_symbol
other_symbol = self.opponent_symbol
for i in xrange(x,x+3):
for j in xrange(y,y+3):
if not (current_block[x][y] == other_symbol or current_block[x][y] == our_symbol):
has_completed = False
if current_block[x][y] == our_symbol and current_block[x + 1][y] == our_symbol and current_block[x + 2][y] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x][y + 1] == our_symbol and current_block[x + 1][y + 1] == our_symbol and current_block[x + 2][y + 1] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x][y + 2] == our_symbol and current_block[x + 1][y + 2] == our_symbol and current_block[x + 2][y + 2] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x][y] == our_symbol and current_block[x][y + 1] == our_symbol and current_block[x][y + 2] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x + 1][y] == our_symbol and current_block[x + 1][y + 1] == our_symbol and current_block[x + 1][y + 2] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x + 2][y] == our_symbol and current_block[x + 2][y + 1] == our_symbol and current_block[x + 2][y + 2] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x][y] == our_symbol and current_block[x + 1][y + 1] == our_symbol and current_block[x + 2][y + 2] == our_symbol:
if first_win==0:
first_win=1
elif current_block[x + 2][y] == our_symbol and current_block[x + 1][y + 1] == our_symbol and current_block[x][y + 2] == our_symbol:
if first_win==0:
first_win=1
if current_block[x][y] == other_symbol and current_block[x + 1][y] == other_symbol and current_block[x + 2][y] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x][y + 1] == other_symbol and current_block[x + 1][y + 1] == other_symbol and current_block[x + 2][y + 1] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x][y + 2] == other_symbol and current_block[x + 1][y + 2] == other_symbol and current_block[x + 2][y + 2] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x][y] == other_symbol and current_block[x][y + 1] == other_symbol and current_block[x][y + 2] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x + 1][y] == other_symbol and current_block[x + 1][y + 1] == other_symbol and current_block[x + 1][y + 2] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x + 2][y] == other_symbol and current_block[x + 2][y + 1] == other_symbol and current_block[x + 2][y + 2] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x][y] == other_symbol and current_block[x + 1][y + 1] == other_symbol and current_block[x + 2][y + 2] == other_symbol:
if first_win==0:
first_win=-1
elif current_block[x + 2][y] == other_symbol and current_block[x + 1][y + 1] == other_symbol and current_block[x][y + 2] == other_symbol:
if first_win==0:
first_win=-1
return (has_completed,first_win)
def get_permitted_blocks(self,old_move):
for_corner = [0,2,3,5,6,8]
#List of permitted blocks, based on old move.
blocks_allowed = []
if old_move[0] in for_corner and old_move[1] in for_corner:
## we will have 3 representative blocks, to choose from
if old_move[0] % 3 == 0 and old_move[1] % 3 == 0:
## top left 3 blocks are allowed
blocks_allowed = [0, 1, 3]
elif old_move[0] % 3 == 0 and old_move[1] in [2, 5, 8]:
## top right 3 blocks are allowed
blocks_allowed = [1,2,5]
elif old_move[0] in [2,5, 8] and old_move[1] % 3 == 0:
## bottom left 3 blocks are allowed
blocks_allowed = [3,6,7]
elif old_move[0] in [2,5,8] and old_move[1] in [2,5,8]:
### bottom right 3 blocks are allowed
blocks_allowed = [5,7,8]
else:
print "SOMETHING REALLY WEIRD HAPPENED!"
sys.exit(1)
else:
#### we will have only 1 block to choose from (or maybe NONE of them, which calls for a free move)
if old_move[0] % 3 == 0 and old_move[1] in [1,4,7]:
## upper-center block
blocks_allowed = [1]
elif old_move[0] in [1,4,7] and old_move[1] % 3 == 0:
## middle-left block
blocks_allowed = [3]
elif old_move[0] in [2,5,8] and old_move[1] in [1,4,7]:
## lower-center block
blocks_allowed = [7]
elif old_move[0] in [1,4,7] and old_move[1] in [2,5,8]:
## middle-right block
blocks_allowed = [5]
elif old_move[0] in [1,4,7] and old_move[1] in [1,4,7]:
blocks_allowed = [4]
for i in reversed(blocks_allowed):
if self.status_board[i] != '-':
blocks_allowed.remove(i)
return blocks_allowed
def get_empty_out_of(self,blal):
cells = []
for idb in blal:
id1 = idb/3
id2 = idb%3
for i in range(id1*3,id1*3+3):
for j in range(id2*3,id2*3+3):
if self.actual_board[i][j] == '-':
cells.append((i,j))
if cells == []:
for i in range(9):
for j in range(9):
no = (i/3)*3
no += (j/3)
if self.actual_board[i][j] == '-' and self.status_board[no] == '-':
cells.append((i,j))
return cells
def get_baseline_allowed_moves(self,current_board,moves): #permitted moves(gand bachao)
pass
def game_completed(self,current_board,our_symbol):
q = [0 for x in xrange(0,9)]
w = [0 for x in xrange(0,9)]
j=0
for i in xrange(0,9):
q[i],w[i]=self.get_status_of_block(i,current_board,our_symbol)
for i in xrange(0,9):
if q[i]==True or w[i]!=0:
j += 1
if w[1]+w[2]+w[0]==3 or w[3]+w[4]+w[5]==3 or w[6]+w[7]+w[8]==3 or w[0]+w[3]+w[6]==3 or w[1]+w[4]+w[7]==3 or w[2]+w[5]+w[8]==3 or w[0]+w[5]+w[8]==3 or w[2]+w[5]+w[7]==3:
return (j,10)
elif w[1]+w[2]+w[0]==-3 or w[3]+w[4]+w[5]==-3 or w[6]+w[7]+w[8]==-3 or w[0]+w[3]+w[6]==-3 or w[1]+w[4]+w[7]==-3 or w[2]+w[5]+w[8]==-3 or w[0]+w[5]+w[8]==-3 or w[2]+w[5]+w[7]==-3:
return (j,-10)
else:
return (j,0)
def return_random_move(self,possible_moves):
return random.choice(possible_moves)
def get_board_status(self):
return self.get_status_block(0, self.status_board, self.player_symbol)
def bind_symbol(self,our_symbol):
self.player_symbol = our_symbol
self.opponent_symbol = 'x'
if self.player_symbol == self.opponent_symbol:
self.opponent_symbol = 'o'
def copy_current_board_elems(self,current_board,board_stat):
self.actual_board = current_board[:]
self.status_board = board_stat[:]
def get_move_from_number(block_number,move_number):
x,y = self.get_block_coords(block_number)
a,b = self.get_block_coords(move_number) # Just got very lazy there. :)
return ( x+(a/3), y+(b/3) )
def make_block_str(self,board,block_number):
x,y = self.get_block_coords(block_number)
string = ""
for i in xrange(x,x+3):
for j in xrange(y,y+3):
string += board[i][j]
return string
def make_minimax_saved_move(self,current_board,blocks_allowed,cells):
acc_moves = []
for block_number in blocks_allowed:
string = self.make_block_str(current_board,block_number)
try:
move_number = self.heuristic_minimax_table[string]
cell = self.get_move_from_number(block_number,move_number)
if cell in cells:
acc_moves.append(cell)
except:
pass
try:
return random.choice(acc_moves)
except:
return random.choice(cells)
def heuristic_score(self,board):
"""
Computes heuristic_score for the passed board configuration
"""
#Calculate h values for each small board
winnable_x = [8,8,8,8,8,8,8,8,8]
lines_x = [[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1]]
winnable_o = [8,8,8,8,8,8,8,8,8]
lines_o = [[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1],[1,1,1,1,1,1,1,1]]
for index in xrange(9):
block_coords = self.get_block_coords(index)
x = block_coords[0]
y = block_coords[1]
#Bad code but can't think of any better atm :/
#4 corners
if board[x][y] == 'x':
if lines_o[0][0] == 1:
lines_o[0][0] = 0
winnable_o[index] -= 1
if lines_o[0][3] == 1:
lines_o[0][3] = 0
winnable_o[index] -= 1
if lines_o[0][6] == 1:
lines_o[0][6] = 0
winnable_o[index] -= 1
elif board[x][y] == 'o':
if lines_x[0][0] == 1:
lines_x[0][0] = 0
winnable_x[index] -= 1
if lines_x[0][3] == 1:
lines_x[0][3] = 0
winnable_x[index] -= 1
if lines_x[0][6] == 1:
lines_x[0][6] = 0
winnable_x[index] -= 1
if board[x+2][y] == 'x':
if lines_o[0][0] == 1:
lines_o[0][0] = 0
winnable_o[index] -= 1
if lines_o[0][5] == 1:
lines_o[0][5] = 0
winnable_o[index] -= 1
if lines_o[0][7] == 1:
lines_o[0][7] = 0
winnable_o[index] -= 1
elif board[x+2][y] == 'o':
if lines_x[0][0] == 1:
lines_x[0][0] = 0
winnable_x[index] -= 1
if lines_x[0][5] == 1:
lines_x[0][5] = 0
winnable_x[index] -= 1
if lines_x[0][7] == 1:
lines_x[0][7] = 0
winnable_x[index] -= 1
if board[x][y+2] == 'x':
if lines_o[0][2] == 1:
lines_o[0][2] = 0
winnable_o[index] -= 1
if lines_o[0][3] == 1:
lines_o[0][3] = 0
winnable_o[index] -= 1
if lines_o[0][7] == 1:
lines_o[0][7] = 0
winnable_o[index] -= 1
elif board[x][y+2] == 'o':
if lines_x[0][2] == 1:
lines_x[0][2] = 0
winnable_x[index] -= 1
if lines_x[0][3] == 1:
lines_x[0][3] = 0
winnable_x[index] -= 1
if lines_x[0][7] == 1:
lines_x[0][7] = 0
winnable_x[index] -= 1
if board[x+2][y+2] == 'x':
if lines_o[0][2] == 1:
lines_o[0][2] = 0
winnable_o[index] -= 1
if lines_o[0][5] == 1:
lines_o[0][5] = 0
winnable_o[index] -= 1
if lines_o[0][6] == 1:
lines_o[0][6] = 0
winnable_o[index] -= 1
elif board[x+2][y+2] == 'o':
if lines_x[0][2] == 1:
lines_x[0][2] = 0
winnable_x[index] -= 1
if lines_x[0][5] == 1:
lines_x[0][5] = 0
winnable_x[index] -= 1
if lines_x[0][6] == 1:
lines_x[0][6] = 0
winnable_x[index] -= 1
#4 sides
if board[x+1][y] == 'x':
if lines_o[0][0] == 1:
lines_o[0][0] = 0
winnable_o[index] -= 1
if lines_o[0][4] == 1:
lines_o[0][4] = 0
winnable_o[index] -= 1
elif board[x+1][y] == 'o':
if lines_x[0][0] == 1:
lines_x[0][0] = 0
winnable_x[index] -= 1
if lines_x[0][4] == 1:
lines_x[0][4] = 0
winnable_x[index] -= 1
if board[x][y+1] == 'x':
if lines_o[0][1] == 1:
lines_o[0][1] = 0
winnable_o[index] -= 1
if lines_o[0][3] == 1:
lines_o[0][3] = 0
winnable_o[index] -= 1
elif board[x][y+1] == 'o':
if lines_x[0][1] == 1:
lines_x[0][1] = 0
winnable_x[index] -= 1
if lines_x[0][3] == 1:
lines_x[0][3] = 0
winnable_x[index] -= 1
if board[x+2][y+1] == 'x':
if lines_o[0][1] == 1:
lines_o[0][1] = 0
winnable_o[index] -= 1
if lines_o[0][5] == 1:
lines_o[0][5] = 0
winnable_o[index] -= 1
elif board[x+2][y+1] == 'o':
if lines_x[0][1] == 1:
lines_x[0][1] = 0
winnable_x[index] -= 1
if lines_x[0][5] == 1:
lines_x[0][5] = 0
winnable_x[index] -= 1
if board[x+1][y+2] == 'x':
if lines_o[0][2] == 1:
lines_o[0][2] = 0
winnable_o[index] -= 1
if lines_o[0][4] == 1:
lines_o[0][4] = 0
winnable_o[index] -= 1
elif board[x+1][y+2] == 'o':
if lines_x[0][2] == 1:
lines_x[0][2] = 0
winnable_x[index] -= 1
if lines_x[0][4] == 1:
lines_x[0][4] = 0
winnable_x[index] -= 1
#Center
if board[x+1][y+1] == 'x':
if lines_o[0][1] == 1:
lines_o[0][1] = 0
winnable_o[index] -= 1
if lines_o[0][4] == 1:
lines_o[0][4] = 0
winnable_o[index] -= 1
if lines_o[0][6] == 1:
lines_o[0][6] = 0
winnable_o[index] -= 1
if lines_o[0][7] == 1:
lines_o[0][7] = 0
winnable_o[index] -= 1
elif board[x+1][y+1] == 'o':
if lines_x[0][1] == 1:
lines_x[0][1] = 0
winnable_x[index] -= 1
if lines_x[0][4] == 1:
lines_x[0][4] = 0
winnable_x[index] -= 1
if lines_x[0][6] == 1:
lines_x[0][6] = 0
winnable_x[index] -= 1
if lines_x[0][7] == 1:
lines_x[0][7] = 0
winnable_x[index] -= 1
#Populate h list
h_list = []
for index in xrange(9):
h_list.append( winnable_x[index] - winnable_o[index] )
#Calculate H using h values
winnable_X = 8 #Winnable lines for X on bigger board
winnable_O = 8 #Winnable lines for O on bigger board
for index in xrange(9):
if index in [0,2,6,8]:
#Corner
if h_list[index] > 0:
winnable_O -= 3
elif h_list[index] < 0:
winnable_X -= 3
elif index in [1,3,5,7]:
#Side
if h_list[index] > 0:
winnable_O -= 2
elif h_list[index] < 0:
winnable_X -= 2
else:
#Center
if h_list[index] > 0:
winnable_O -= 4
elif h_list[index] < 0:
winnable_X -= 4
H = winnable_X - winnable_O
return H
def update_and_save_board_status(self,move_ret,symbol):
self.backup_status_board = self.status_board[:]
block_no = (move_ret[0]/3)*3 + (move_ret[1])/3
id1 = block_no/3
id2 = block_no%3
mg = 0
mflg = 0
if self.status_board[block_no] == '-':
if self.actual_board[id1*3][id2*3] == self.actual_board[id1*3+1][id2*3+1] and self.actual_board[id1*3+1][id2*3+1] == self.actual_board[id1*3+2][id2*3+2] and self.actual_board[id1*3+1][id2*3+1] != '-':
mflg=1
if self.actual_board[id1*3+2][id2*3] == self.actual_board[id1*3+1][id2*3+1] and self.actual_board[id1*3+1][id2*3+1] == self.actual_board[id1*3][id2*3 + 2] and self.actual_board[id1*3+1][id2*3+1] != '-':
mflg=1
if mflg != 1:
for i in range(id2*3,id2*3+3):
if self.actual_board[id1*3][i]==self.actual_board[id1*3+1][i] and self.actual_board[id1*3+1][i] == self.actual_board[id1*3+2][i] and self.actual_board[id1*3][i] != '-':
mflg = 1
break
if mflg != 1:
for i in range(id1*3,id1*3+3):
if self.actual_board[i][id2*3]==self.actual_board[i][id2*3+1] and self.actual_board[i][id2*3+1] == self.actual_board[i][id2*3+2] and self.actual_board[i][id2*3] != '-':
mflg = 1
break
if mflg == 1:
self.status_board[block_no] = symbol
id1 = block_no/3
id2 = block_no%3
cells = []
for i in range(id1*3,id1*3+3):
for j in range(id2*3,id2*3+3):
if self.actual_board[i][j] == '-':
cells.append((i,j))
if cells == [] and mflg != 1:
self.status_board[block_no] = 'd'
def reverse_board_status(self):
self.status_board = self.backup_status_board[:]
def free_move(self):
pass # return whatever
def _get_symbol_from_is_maximizing_player(self, is_maximizing_player):
if is_maximizing_player:
return self.player_symbol
else:
return self.opponent_symbol
def negamax_alpha_beta_transposition_table(self, opponent_move, depth, alpha, beta, is_maximizing_player):
alpha_orig = alpha
blocks_allowed = self.get_permitted_blocks(opponent_move)
cells = self.get_empty_out_of(blocks_allowed)
if not cells:
if is_maximizing_player:
return (None, -99999)
else:
return (None, 99999)
# Table lookup here
board_str = self.make_board_str()
try:
tt_depth,tt_flag,tt_value = self.transposition_table[board_str]
if tt_depth >= depth:
if tt_flag == 0: # EXACT
return (cells[0],tt_value)
elif tt_flag == -1: #LOWERBOUND
alpha = max(alpha,tt_value)
elif tt_flag == 1: #UPPERBOUND
beta = min(beta,tt_value)
if alpha >= beta:
return (cells[0],tt_value)
except:
pass
# check termination conditions
game_status, game_score = self.game_completed(self.actual_board, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
if depth == 0 and is_maximizing_player: # Or is terminal node
return ((cells[0]), self.heuristic_score(self.actual_board))
if depth == 0 and not is_maximizing_player:
return ((cells[0]), -self.heuristic_score(self.actual_board))
elif game_status == 9:
return ((cells[0]), game_score)
if is_maximizing_player:
v = -99999 # for the first case only
else:
v = 99999
for cell in cells:
x,y = cell
self.actual_board[x][y] = self._get_symbol_from_is_maximizing_player(is_maximizing_player)
self.update_and_save_board_status(cell, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
child_node_values = self.negamax_alpha_beta_transposition_table(cell, depth - 1, -beta, -alpha, (not is_maximizing_player))
self.actual_board[x][y] = '-'
self.reverse_board_status()
v = -1*child_node_values[1]
if v > alpha:
alpha = v
if beta <= alpha:
break
# Building States here
new_entry_value = v
if new_entry_value <= alpha_orig:
new_entry_flag = 1 #UPPERBOUND
elif new_entry_value >= beta:
new_entry_flag = -1 #LOWERBOUND
else:
new_entry_flag = 0 # EXACT
new_entry_depth = depth
self.transposition_table[board_str] = (new_entry_depth,new_entry_flag,new_entry_value)
return (cells[0], v) # return the cell of the calling function
def negamax_alpha_beta(self, opponent_move, depth, alpha, beta, is_maximizing_player):
blocks_allowed = self.get_permitted_blocks(opponent_move)
cells = self.get_empty_out_of(blocks_allowed)
# check termination conditions
if not cells:
if is_maximizing_player:
return (None, -99999)
else:
return (None, 99999)
game_status, game_score = self.game_completed(self.actual_board, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
if depth == 0 and is_maximizing_player: # Or is terminal node
return ((cells[0]), self.heuristic_score(self.actual_board))
if depth == 0 and not is_maximizing_player:
return ((cells[0]), -self.heuristic_score(self.actual_board))
elif game_status == 9:
return ((cells[0]), game_score)
if is_maximizing_player:
v = -99999 # for the first case only
else:
v = 99999
for cell in cells:
x,y = cell
self.actual_board[x][y] = self._get_symbol_from_is_maximizing_player(is_maximizing_player)
self.update_and_save_board_status(cell, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
child_node_values = self.negamax_alpha_beta(cell, depth - 1, -beta, -alpha, (not is_maximizing_player))
self.actual_board[x][y] = '-'
self.reverse_board_status()
v = -1*child_node_values[1]
if v > alpha:
alpha = v
if beta <= alpha:
break
return (cells[0], v) # return the cell of the calling function
# """
def minimax_alpha_beta_transposition_table(self, opponent_move, depth, alpha, beta, is_maximizing_player):
highest_heurestic = 0
cell_selected = None
alpha_orig = alpha
board_str = self.make_board_str()
try:
tt_depth,tt_flag,tt_value = self.transposition_table[board_str]
if tt_depth >= depth:
if tt_flag == 0: # EXACT
return (cells[0],tt_value)
elif tt_flag == -1: #LOWERBOUND
alpha = max(alpha,tt_value)
elif tt_flag == 1: #UPPERBOUND
beta = min(beta,tt_value)
if alpha >= beta:
return (cells[0],tt_value)
except:
pass
blocks_allowed = self.get_permitted_blocks(opponent_move)
cells = self.get_empty_out_of(blocks_allowed)
# check termination conditions
if not cells:
if is_maximizing_player:
return (None, -99999)
else:
return (None, 99999)
# Table lookup here
game_status, game_score = self.game_completed(self.actual_board, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
if depth == 0: # Or is terminal node
return ((cells[0]), self.heuristic_score(self.actual_board))
elif game_status == 9:
return ((cells[0]), game_score)
else:
# begin to prune
if is_maximizing_player:
v = -99999 # for the first case only
for cell in cells:
x,y = cell
self.actual_board[x][y] = self._get_symbol_from_is_maximizing_player(is_maximizing_player)
self.update_and_save_board_status(cell, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
temp = self.minimax_alpha_beta_transposition_table(cell, depth - 1, alpha, beta, False)
v = max(v, temp[1])
self.actual_board[x][y] = '-'
self.reverse_board_status()
if v > alpha:
alpha = v
if beta <= alpha:
break
if highest_heurestic < temp[1]:
highest_heurestic = temp[1]
cell_selected = temp[0]
else:
v = 99999 # for the first case only
for cell in cells:
x,y = cell
self.actual_board[x][y] = self._get_symbol_from_is_maximizing_player(is_maximizing_player)
self.update_and_save_board_status(cell, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
temp = self.minimax_alpha_beta_transposition_table(cell, depth - 1, alpha, beta, True)
v= min(v, temp[1])
self.actual_board[x][y] = '-'
self.reverse_board_status()
if beta < v:
beta = v
if beta <= alpha:
break
if highest_heurestic < temp[1]:
highest_heurestic = temp[1]
cell_selected = temp[0]
# Building States here
new_entry_value = v
if new_entry_value <= alpha_orig:
new_entry_flag = 1 #UPPERBOUND
elif new_entry_value >= beta:
new_entry_flag = -1 #LOWERBOUND
else:
new_entry_flag = 0 # EXACT
new_entry_depth = depth
self.transposition_table[board_str] = (new_entry_depth,new_entry_flag,new_entry_value)
return (cells[0], v) # return the cell of the calling function
# """
def minimax_alpha_beta(self, opponent_move, depth, alpha, beta, is_maximizing_player):
blocks_allowed = self.get_permitted_blocks(opponent_move)
cells = self.get_empty_out_of(blocks_allowed)
# check termination conditions
if not cells:
if is_maximizing_player:
return (None, -99999)
else:
return (None, 99999)
game_status, game_score = self.game_completed(self.actual_board, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
if depth == 0: # Or is terminal node
return ((cells[0]), self.heuristic_score(self.actual_board))
elif game_status == 9:
return ((cells[0]), game_score)
else:
# begin to prune
if is_maximizing_player:
v = -99999 # for the first case only
for cell in cells:
x,y = cell
self.actual_board[x][y] = self._get_symbol_from_is_maximizing_player(is_maximizing_player)
self.update_and_save_board_status(cell, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
child_node_values = self.minimax_alpha_beta(cell, depth - 1, alpha, beta, False)
self.actual_board[x][y] = '-'
self.reverse_board_status()
v = child_node_values[1]
if v > alpha:
alpha = v
if beta <= alpha:
break
return (cells[0], v) # return the cell of the calling function
else:
v = 99999 # for the first case only
for cell in cells:
x,y = cell
self.actual_board[x][y] = self._get_symbol_from_is_maximizing_player(is_maximizing_player)
self.update_and_save_board_status(cell, self._get_symbol_from_is_maximizing_player(is_maximizing_player))
child_node_values = self.minimax_alpha_beta(cell, depth - 1, alpha, beta, True)
self.actual_board[x][y] = '-'
self.reverse_board_status()
v = child_node_values[1]
if beta < v:
beta = v
if beta <= alpha:
break
return (cells[0], v) # return the cell of the calling function
def free_move(self):
print "Reached free move"
return None
def move(self,current_board,board_stat,opponent_move,our_symbol):
'''
Parameters - opponent_move - <(a,b)> previous move by opponent; board_stat - <[]> info of blocks won/lost;
current_board - <[]> current board situation; our_symbol
Return Value - move- <(row,column)>
'''
self.bind_symbol(our_symbol)
self.copy_current_board_elems(current_board,board_stat)
self.number_of_moves = self.number_of_moves + 1
blocks_allowed = self.get_permitted_blocks(opponent_move)
cells = self.get_empty_out_of(blocks_allowed)
if not cells:
return self.free_move()
if self.number_of_moves < 10:
print "switching to level 3"
depth = 3
elif self.number_of_moves < 18:
print "switching to level 5"
depth = 5
else:
print "switching to level 7"
depth = 7
print self.player_symbol
signal.signal(signal.SIGALRM, EnforcedTimeHandler)
signal.alarm(ENFORCED_TIME)
try:
move, value = self.minimax_alpha_beta_transposition_table(opponent_move, depth, -99999, 99999, True)
except EnforcedTimeExecption:
move = self.make_minimax_saved_move(current_board,blocks_allowed,cells)
#print "TLE\t\t\t\top: " + str(opponent_move) + "\t\t\t" + str(move)
signal.alarm(0)
if move not in cells:
return random.choice(cells)
return move
|
|
# Copyright 2010 United States Government as represented by the
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For ZoneManager
"""
import datetime
import mox
from nova import context
from nova import db
from nova import flags
from nova import service
from nova import test
from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
from nova.scheduler import zone_manager
FLAGS = flags.FLAGS
class FakeZone:
"""Represents a fake zone from the db"""
def __init__(self, *args, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def exploding_novaclient(zone):
"""Used when we want to simulate a novaclient call failing."""
raise Exception("kaboom")
class ZoneManagerTestCase(test.TestCase):
"""Test case for zone manager"""
def test_ping(self):
zm = zone_manager.ZoneManager()
self.mox.StubOutWithMock(zm, '_refresh_from_db')
self.mox.StubOutWithMock(zm, '_poll_zones')
zm._refresh_from_db(mox.IgnoreArg())
zm._poll_zones(mox.IgnoreArg())
self.mox.ReplayAll()
zm.ping(None)
self.mox.VerifyAll()
def test_refresh_from_db_new(self):
zm = zone_manager.ZoneManager()
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([
FakeZone(id=1, api_url='http://foo.com', username='user1',
password='pass1'),
])
self.assertEquals(len(zm.zone_states), 0)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[1].username, 'user1')
def test_service_capabilities(self):
zm = zone_manager.ZoneManager()
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, {})
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc10_a=(99, 99), svc10_b=(99, 99)))
zm.update_service_capabilities("svc1", "host3", dict(c=5))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc1_c=(5, 5), svc10_a=(99, 99),
svc10_b=(99, 99)))
def test_refresh_from_db_replace_existing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
username='user1', password='pass1'))
zm.zone_states[1] = zone_state
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([
FakeZone(id=1, api_url='http://foo.com', username='user2',
password='pass2'),
])
self.assertEquals(len(zm.zone_states), 1)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[1].username, 'user2')
def test_refresh_from_db_missing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
username='user1', password='pass1'))
zm.zone_states[1] = zone_state
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([])
self.assertEquals(len(zm.zone_states), 1)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 0)
def test_refresh_from_db_add_and_delete(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
username='user1', password='pass1'))
zm.zone_states[1] = zone_state
self.mox.StubOutWithMock(db, 'zone_get_all')
db.zone_get_all(mox.IgnoreArg()).AndReturn([
FakeZone(id=2, api_url='http://foo.com', username='user2',
password='pass2'),
])
self.assertEquals(len(zm.zone_states), 1)
self.mox.ReplayAll()
zm._refresh_from_db(None)
self.mox.VerifyAll()
self.assertEquals(len(zm.zone_states), 1)
self.assertEquals(zm.zone_states[2].username, 'user2')
def test_poll_zone(self):
self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
dict(name='zohan', capabilities='hairdresser'))
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=2,
api_url='http://foo.com', username='user2',
password='pass2'))
zone_state.attempt = 1
self.mox.ReplayAll()
zone_manager._poll_zone(zone_state)
self.mox.VerifyAll()
self.assertEquals(zone_state.attempt, 0)
self.assertEquals(zone_state.name, 'zohan')
def test_poll_zone_fails(self):
self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
zone_state = zone_manager.ZoneState()
zone_state.update_credentials(FakeZone(id=2,
api_url='http://foo.com', username='user2',
password='pass2'))
zone_state.attempt = FLAGS.zone_failures_to_offline - 1
self.mox.ReplayAll()
zone_manager._poll_zone(zone_state)
self.mox.VerifyAll()
self.assertEquals(zone_state.attempt, 3)
self.assertFalse(zone_state.is_active)
self.assertEquals(zone_state.name, None)
def test_host_service_caps_stale_no_stale_service(self):
zm = zone_manager.ZoneManager()
# services just updated capabilities
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
self.assertFalse(zm.host_service_caps_stale("host1", "svc1"))
self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
def test_host_service_caps_stale_all_stale_services(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# Both services became stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
utils.set_time_override(time_future)
self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
self.assertTrue(zm.host_service_caps_stale("host1", "svc2"))
utils.clear_time_override()
def test_host_service_caps_stale_one_stale_service(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# One service became stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
caps = zm.service_states["host1"]["svc1"]
caps["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
def test_delete_expired_host_services_del_one_service(self):
zm = zone_manager.ZoneManager()
# Delete one service in a host
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
stale_host_services = {"host1": ["svc1"]}
zm.delete_expired_host_services(stale_host_services)
self.assertFalse("svc1" in zm.service_states["host1"])
self.assertTrue("svc2" in zm.service_states["host1"])
def test_delete_expired_host_services_del_all_hosts(self):
zm = zone_manager.ZoneManager()
# Delete all services in a host
zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
stale_host_services = {"host1": ["svc1", "svc2"]}
zm.delete_expired_host_services(stale_host_services)
self.assertFalse("host1" in zm.service_states)
def test_delete_expired_host_services_del_one_service_per_host(self):
zm = zone_manager.ZoneManager()
# Delete one service per host
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
stale_host_services = {"host1": ["svc1"], "host2": ["svc1"]}
zm.delete_expired_host_services(stale_host_services)
self.assertFalse("host1" in zm.service_states)
self.assertFalse("host2" in zm.service_states)
def test_get_zone_capabilities_one_host(self):
zm = zone_manager.ZoneManager()
# Service capabilities recent
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
def test_get_zone_capabilities_expired_host(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# Service capabilities stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
utils.set_time_override(time_future)
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, {})
utils.clear_time_override()
def test_get_zone_capabilities_multiple_hosts(self):
zm = zone_manager.ZoneManager()
# Both host service capabilities recent
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4)))
def test_get_zone_capabilities_one_stale_host(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# One host service capabilities become stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
serv_caps = zm.service_states["host1"]["svc1"]
serv_caps["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4)))
def test_get_zone_capabilities_multiple_service_per_host(self):
zm = zone_manager.ZoneManager()
# Multiple services per host
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4),
svc2_a=(5, 7), svc2_b=(6, 8)))
def test_get_zone_capabilities_one_stale_service_per_host(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# Two host services among four become stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
serv_caps_1 = zm.service_states["host1"]["svc2"]
serv_caps_1["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
serv_caps_2 = zm.service_states["host2"]["svc1"]
serv_caps_2["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2),
svc2_a=(7, 7), svc2_b=(8, 8)))
def test_get_zone_capabilities_three_stale_host_services(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# Three host services among four become stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
serv_caps_1 = zm.service_states["host1"]["svc2"]
serv_caps_1["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
serv_caps_2 = zm.service_states["host2"]["svc1"]
serv_caps_2["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
serv_caps_3 = zm.service_states["host2"]["svc2"]
serv_caps_3["timestamp"] = utils.utcnow() - \
datetime.timedelta(seconds=expiry_time)
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
def test_get_zone_capabilities_all_stale_host_services(self):
zm = zone_manager.ZoneManager()
expiry_time = (FLAGS.periodic_interval * 3) + 1
# All the host services become stale
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
utils.set_time_override(time_future)
caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, {})
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import threading
import os
import sys
import traceback
try:
from multiprocessing import current_process
from multiprocessing import util as mputil
except ImportError:
current_process = mputil = None # noqa
from . import current_app
from . import signals
from .local import Proxy
from .utils import LOG_LEVELS, isatty
from .utils.compat import LoggerAdapter, WatchedFileHandler
from .utils.encoding import safe_str, str_t
from .utils.patch import ensure_process_aware_logger
from .utils.term import colored
is_py3k = sys.version_info >= (3, 0)
def mlevel(level):
if level and not isinstance(level, int):
return LOG_LEVELS[level.upper()]
return level
class ColorFormatter(logging.Formatter):
#: Loglevel -> Color mapping.
COLORS = colored().names
colors = {"DEBUG": COLORS["blue"], "WARNING": COLORS["yellow"],
"ERROR": COLORS["red"], "CRITICAL": COLORS["magenta"]}
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def formatException(self, ei):
r = logging.Formatter.formatException(self, ei)
if isinstance(r, str) and not is_py3k:
return safe_str(r)
return r
def format(self, record):
levelname = record.levelname
color = self.colors.get(levelname)
if self.use_color and color:
try:
record.msg = safe_str(str_t(color(record.msg)))
except Exception, exc:
record.msg = "<Unrepresentable %r: %r>" % (
type(record.msg), exc)
record.exc_info = sys.exc_info()
if not is_py3k:
# Very ugly, but have to make sure processName is supported
# by foreign logger instances.
# (processName is always supported by Python 2.7)
if "processName" not in record.__dict__:
process_name = (current_process and
current_process()._name or "")
record.__dict__["processName"] = process_name
return safe_str(logging.Formatter.format(self, record))
class Logging(object):
#: The logging subsystem is only configured once per process.
#: setup_logging_subsystem sets this flag, and subsequent calls
#: will do nothing.
_setup = False
def __init__(self, app):
self.app = app
self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL)
self.format = self.app.conf.CELERYD_LOG_FORMAT
self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT
self.colorize = self.app.conf.CELERYD_LOG_COLOR
def supports_color(self, logfile=None):
if self.app.IS_WINDOWS:
# Windows does not support ANSI color codes.
return False
if self.colorize is None:
# Only use color if there is no active log file
# and stderr is an actual terminal.
return logfile is None and isatty(sys.stderr)
return self.colorize
def colored(self, logfile=None):
return colored(enabled=self.supports_color(logfile))
def get_task_logger(self, loglevel=None, name=None):
logger = logging.getLogger(name or "celery.task.default")
if loglevel is not None:
logger.setLevel(mlevel(loglevel))
return logger
def setup_logging_subsystem(self, loglevel=None, logfile=None,
format=None, colorize=None, **kwargs):
if Logging._setup:
return
loglevel = mlevel(loglevel or self.loglevel)
format = format or self.format
if colorize is None:
colorize = self.supports_color(logfile)
if mputil and hasattr(mputil, "_logger"):
mputil._logger = None
if not is_py3k:
ensure_process_aware_logger()
receivers = signals.setup_logging.send(sender=None,
loglevel=loglevel, logfile=logfile,
format=format, colorize=colorize)
if not receivers:
root = logging.getLogger()
if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
root.handlers = []
mp = mputil.get_logger() if mputil else None
for logger in filter(None, (root, mp)):
self._setup_logger(logger, logfile, format, colorize, **kwargs)
logger.setLevel(mlevel(loglevel))
signals.after_setup_logger.send(sender=None, logger=logger,
loglevel=loglevel, logfile=logfile,
format=format, colorize=colorize)
# This is a hack for multiprocessing's fork+exec, so that
# logging before Process.run works.
os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
_MP_FORK_LOGFILE_=logfile or "",
_MP_FORK_LOGFORMAT_=format)
Logging._setup = True
return receivers
def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
redirect_level="WARNING"):
handled = self.setup_logging_subsystem(loglevel=loglevel,
logfile=logfile)
if not handled:
logger = self.get_default_logger()
if redirect_stdouts:
self.redirect_stdouts_to_logger(logger,
loglevel=redirect_level)
os.environ.update(
CELERY_LOG_LEVEL=str(loglevel) if loglevel else "",
CELERY_LOG_FILE=str(logfile) if logfile else "",
CELERY_LOG_REDIRECT="1" if redirect_stdouts else "",
CELERY_LOG_REDIRECT_LEVEL=str(redirect_level))
def _detect_handler(self, logfile=None):
"""Create log handler with either a filename, an open stream
or :const:`None` (stderr)."""
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, "write"):
return logging.StreamHandler(logfile)
return WatchedFileHandler(logfile)
def get_default_logger(self, loglevel=None, name="celery"):
"""Get default logger instance.
:keyword loglevel: Initial log level.
"""
logger = logging.getLogger(name)
if loglevel is not None:
logger.setLevel(mlevel(loglevel))
return logger
def setup_logger(self, loglevel=None, logfile=None,
format=None, colorize=None, name="celery", root=True,
app=None, **kwargs):
"""Setup the :mod:`multiprocessing` logger.
If `logfile` is not specified, then `sys.stderr` is used.
Returns logger object.
"""
loglevel = mlevel(loglevel or self.loglevel)
format = format or self.format
if colorize is None:
colorize = self.supports_color(logfile)
if not root or self.app.conf.CELERYD_HIJACK_ROOT_LOGGER:
return self._setup_logger(self.get_default_logger(loglevel, name),
logfile, format, colorize, **kwargs)
self.setup_logging_subsystem(loglevel, logfile,
format, colorize, **kwargs)
return self.get_default_logger(name=name)
def setup_task_logger(self, loglevel=None, logfile=None, format=None,
colorize=None, task_name=None, task_id=None, propagate=False,
app=None, **kwargs):
"""Setup the task logger.
If `logfile` is not specified, then `sys.stderr` is used.
Returns logger object.
"""
loglevel = mlevel(loglevel or self.loglevel)
format = format or self.task_format
if colorize is None:
colorize = self.supports_color(logfile)
logger = self._setup_logger(self.get_task_logger(loglevel, task_name),
logfile, format, colorize, **kwargs)
logger.propagate = int(propagate) # this is an int for some reason.
# better to not question why.
signals.after_setup_task_logger.send(sender=None, logger=logger,
loglevel=loglevel, logfile=logfile,
format=format, colorize=colorize)
return LoggerAdapter(logger, {"task_id": task_id,
"task_name": task_name})
def redirect_stdouts_to_logger(self, logger, loglevel=None,
stdout=True, stderr=True):
"""Redirect :class:`sys.stdout` and :class:`sys.stderr` to a
logging instance.
:param logger: The :class:`logging.Logger` instance to redirect to.
:param loglevel: The loglevel redirected messages will be logged as.
"""
proxy = LoggingProxy(logger, loglevel)
if stdout:
sys.stdout = proxy
if stderr:
sys.stderr = proxy
return proxy
def _is_configured(self, logger):
return logger.handlers and not getattr(
logger, "_rudimentary_setup", False)
def _setup_logger(self, logger, logfile, format, colorize,
formatter=ColorFormatter, **kwargs):
if self._is_configured(logger):
return logger
handler = self._detect_handler(logfile)
handler.setFormatter(formatter(format, use_color=colorize))
logger.addHandler(handler)
return logger
get_default_logger = Proxy(lambda: current_app.log.get_default_logger)
setup_logger = Proxy(lambda: current_app.log.setup_logger)
setup_task_logger = Proxy(lambda: current_app.log.setup_task_logger)
get_task_logger = Proxy(lambda: current_app.log.get_task_logger)
setup_logging_subsystem = Proxy(
lambda: current_app.log.setup_logging_subsystem)
redirect_stdouts_to_logger = Proxy(
lambda: current_app.log.redirect_stdouts_to_logger)
class LoggingProxy(object):
"""Forward file object to :class:`logging.Logger` instance.
:param logger: The :class:`logging.Logger` instance to forward to.
:param loglevel: Loglevel to use when writing messages.
"""
mode = "w"
name = None
closed = False
loglevel = logging.ERROR
_thread = threading.local()
def __init__(self, logger, loglevel=None):
self.logger = logger
self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
self._safewrap_handlers()
def _safewrap_handlers(self):
"""Make the logger handlers dump internal errors to
`sys.__stderr__` instead of `sys.stderr` to circumvent
infinite loops."""
def wrap_handler(handler): # pragma: no cover
class WithSafeHandleError(logging.Handler):
def handleError(self, record):
exc_info = sys.exc_info()
try:
try:
traceback.print_exception(exc_info[0],
exc_info[1],
exc_info[2],
None, sys.__stderr__)
except IOError:
pass # see python issue 5971
finally:
del(exc_info)
handler.handleError = WithSafeHandleError().handleError
return map(wrap_handler, self.logger.handlers)
def write(self, data):
if getattr(self._thread, "recurse_protection", False):
# Logger is logging back to this file, so stop recursing.
return
"""Write message to logging object."""
data = data.strip()
if data and not self.closed:
self._thread.recurse_protection = True
try:
self.logger.log(self.loglevel, safe_str(data))
finally:
self._thread.recurse_protection = False
def writelines(self, sequence):
"""`writelines(sequence_of_strings) -> None`.
Write the strings to the file.
The sequence can be any iterable object producing strings.
This is equivalent to calling :meth:`write` for each string.
"""
for part in sequence:
self.write(part)
def flush(self):
"""This object is not buffered so any :meth:`flush` requests
are ignored."""
pass
def close(self):
"""When the object is closed, no write requests are forwarded to
the logging object anymore."""
self.closed = True
def isatty(self):
"""Always returns :const:`False`. Just here for file support."""
return False
def fileno(self):
pass
class SilenceRepeated(object):
"""Only log action every n iterations."""
def __init__(self, action, max_iterations=10):
self.action = action
self.max_iterations = max_iterations
self._iterations = 0
def __call__(self, *args, **kwargs):
if not self._iterations or self._iterations >= self.max_iterations:
self.action(*args, **kwargs)
self._iterations = 0
else:
self._iterations += 1
|
|
import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
import django
from django.core.management.base import CommandError, NoArgsCommand
from django.utils.text import get_text_list
from django.utils.jslex import prepare_js_for_gettext
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
def handle_extensions(extensions=('html',), ignored=('py',)):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times. Note that the .py extension is ignored
here because of the way non-*.py files are handled in make_messages() (they
are copied to file.ext.py files to trick xgettext to parse them as Python
files).
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
set(['.html', '.js'])
>>> handle_extensions(['.html, txt,.tpl'])
set(['.html', '.tpl', '.txt'])
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set([x for x in ext_list if x.strip('.') not in ignored])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
output, errors = p.communicate()
return output, errors, p.returncode
def find_files(root, ignore_patterns, verbosity, stdout=sys.stdout, symlinks=False):
"""
Helper function to get all files in the given root.
"""
dir_suffix = '%s*' % os.sep
norm_patterns = [p[:-len(dir_suffix)] if p.endswith(dir_suffix) else p for p in ignore_patterns]
all_files = []
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=symlinks):
for dirname in dirnames[:]:
if is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns):
dirnames.remove(dirname)
if verbosity > 1:
stdout.write('ignoring directory %s\n' % dirname)
for filename in filenames:
if is_ignored(os.path.normpath(os.path.join(dirpath, filename)), ignore_patterns):
if verbosity > 1:
stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
all_files.extend([(dirpath, filename)])
all_files.sort()
return all_files
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def copy_plural_forms(msgs, locale, domain, verbosity, stdout=sys.stdout):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with open(django_po, 'rU') as fp:
m = plural_forms_re.search(fp.read())
if m:
if verbosity > 1:
stdout.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
def write_pot_file(potfile, msgs, file, work_file, is_templatized):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if is_templatized:
old = '#: ' + work_file[2:]
new = '#: ' + file[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with open(potfile, 'a') as fp:
fp.write(msgs)
def process_file(file, dirpath, potfile, domain, verbosity,
extensions, wrap, location, stdout=sys.stdout):
"""
Extract translatable literals from :param file: for :param domain:
creating or updating the :param potfile: POT file.
Uses the xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if verbosity > 1:
stdout.write('processing file %s in %s\n' % (file, dirpath))
_, file_ext = os.path.splitext(file)
if domain == 'djangojs' and file_ext in extensions:
is_templatized = True
orig_file = os.path.join(dirpath, file)
with open(orig_file) as fp:
src_data = fp.read()
src_data = prepare_js_for_gettext(src_data)
thefile = '%s.c' % file
work_file = os.path.join(dirpath, thefile)
with open(work_file, "w") as fp:
fp.write(src_data)
cmd = (
'xgettext -d %s -L C %s %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
'--from-code UTF-8 --add-comments=Translators -o - "%s"' %
(domain, wrap, location, work_file))
elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
thefile = file
orig_file = os.path.join(dirpath, file)
is_templatized = file_ext in extensions
if is_templatized:
with open(orig_file, "rU") as fp:
src_data = fp.read()
thefile = '%s.py' % file
content = templatize(src_data, orig_file[2:])
with open(os.path.join(dirpath, thefile), "w") as fp:
fp.write(content)
work_file = os.path.join(dirpath, thefile)
cmd = (
'xgettext -d %s -L Python %s %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=ugettext_noop --keyword=ugettext_lazy '
'--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
'--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
'--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
'--add-comments=Translators -o - "%s"' %
(domain, wrap, location, work_file))
else:
return
msgs, errors, status = _popen(cmd)
if errors:
if status != STATUS_OK:
if is_templatized:
os.unlink(work_file)
if os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(file, errors))
elif verbosity > 0:
# Print warnings
stdout.write(errors)
if msgs:
write_pot_file(potfile, msgs, orig_file, work_file, is_templatized)
if is_templatized:
os.unlink(work_file)
def write_po_file(pofile, potfile, domain, locale, verbosity, stdout,
copy_pforms, wrap, location, no_obsolete):
"""
Creates of updates the :param pofile: PO file for :param domain: and :param
locale:. Uses contents of the existing :param potfile:.
Uses mguniq, msgmerge, and msgattrib GNU gettext utilities.
"""
msgs, errors, status = _popen('msguniq %s %s --to-code=utf-8 "%s"' %
(wrap, location, potfile))
if errors:
if status != STATUS_OK:
os.unlink(potfile)
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif verbosity > 0:
stdout.write(errors)
if os.path.exists(pofile):
with open(potfile, 'w') as fp:
fp.write(msgs)
msgs, errors, status = _popen('msgmerge %s %s -q "%s" "%s"' %
(wrap, location, pofile, potfile))
if errors:
if status != STATUS_OK:
os.unlink(potfile)
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif verbosity > 0:
stdout.write(errors)
elif copy_pforms:
msgs = copy_plural_forms(msgs, locale, domain, verbosity, stdout)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % domain, "")
with open(pofile, 'w') as fp:
fp.write(msgs)
os.unlink(potfile)
if no_obsolete:
msgs, errors, status = _popen(
'msgattrib %s %s -o "%s" --no-obsolete "%s"' %
(wrap, location, pofile, pofile))
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif verbosity > 0:
stdout.write(errors)
def make_messages(locale=None, domain='django', verbosity=1, all=False,
extensions=None, symlinks=False, ignore_patterns=None, no_wrap=False,
no_location=False, no_obsolete=False, stdout=sys.stdout):
"""
Uses the ``locale/`` directory from the Django Git tree or an
application/project to process all files with translatable literals for
the :param domain: domain and :param locale: locale.
"""
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
if ignore_patterns is None:
ignore_patterns = []
invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
invoked_for_django = True
# Ignoring all contrib apps
ignore_patterns += ['contrib/*']
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django Git "
"tree or your project or app tree. If you did indeed run it "
"from the Git checkout or your project or application, "
"maybe you are just missing the conf/locale (in the django "
"tree) or locale (for project and application) directory? It "
"is not created automatically, you have to create it by hand "
"if you want to enable i18n for your project or application.")
if domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")
if (locale is None and not all) or domain is None:
message = "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1])
raise CommandError(message)
# We require gettext version 0.15 or newer.
output, errors, status = _popen('xgettext --version')
if status != STATUS_OK:
raise CommandError("Error running xgettext. Note that Django "
"internationalization requires GNU gettext 0.15 or newer.")
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU "
"gettext 0.15 or newer. You are using version %s, please "
"upgrade your gettext toolset." % match.group())
locales = []
if locale is not None:
locales.append(str(locale))
elif all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
locales = [os.path.basename(l) for l in locale_dirs]
wrap = '--no-wrap' if no_wrap else ''
location = '--no-location' if no_location else ''
for locale in locales:
if verbosity > 0:
stdout.write("processing language %s\n" % locale)
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(domain))
potfile = os.path.join(basedir, '%s.pot' % str(domain))
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(".", ignore_patterns, verbosity,
stdout, symlinks=symlinks):
try:
process_file(file, dirpath, potfile, domain, verbosity, extensions,
wrap, location, stdout)
except UnicodeDecodeError:
stdout.write("UnicodeDecodeError: skipped file %s in %s" % (file, dirpath))
if os.path.exists(potfile):
write_po_file(pofile, potfile, domain, locale, verbosity, stdout,
not invoked_for_django, wrap, location, no_obsolete)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files for the given locale (e.g. pt_BR).'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt", or "js" if the domain is "djangojs"). Separate multiple extensions with commas, or use -e multiple times.',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
make_option('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines"),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings"),
)
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
requires_model_validation = False
can_import_settings = False
def handle_noargs(self, *args, **options):
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
no_wrap = options.get('no_wrap')
no_location = options.get('no_location')
no_obsolete = options.get('no_obsolete')
if domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt']
extensions = handle_extensions(exts)
if verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(extensions), 'and'))
make_messages(locale, domain, verbosity, process_all, extensions,
symlinks, ignore_patterns, no_wrap, no_location, no_obsolete, self.stdout)
|
|
#!/usr/bin/env python
"""
Read protein match output produced by alignment-panel-civ.py and group it by
pathogen (either virus or bacteria).
This is currently only useful when you are matching against a subject protein
database whose titles have a pathogen accession number in the 5th |-separated
field, like this:
civ|GENBANK|YP_008686600.1|GENBANK|NC_022580.1|glycoprotein [pathogen name]
civ|GENBANK|YP_008686601.1|GENBANK|NC_022580.1|L protein [pathogen name]
civ|GENBANK|YP_008686602.1|GENBANK|NC_022581.1|nucleoprotein [pathogen name]
In this case, the first two matched subjects are from the same pathogen
(accession NC_022580.1). This script will gather those matches under their
common accession number and provides methods to print them.
Files with names in this format are generated by the make-protein-database.py
script in this directory and are used in databases such as RVDB.
Note that the *only* part of the title that is used in this script is the
pathogen accession number. The product (e.g., nucleoprotein) and pathogen name
are just there to ease human reading of the matches. The accession number is
looked up in a passed database (created with make-protein-database.py)
The script reads *file names* from standard input, and writes to standard
output. Alternately, you can also provide file names on the command line.
Typical usage:
$ find . -name summary-proteins |
proteins-to-pathogens-civ.py --html [options] > index.html
Input files must contain lines in the following format:
0.01 58.9 58.9 1 1 1772 title
0.01 58.9 58.9 1 1 1772 title
0.09 51.8 57.4 2 2 3481 title
Fields must be whitespace separated. The seven fields are:
Coverage
Median bit score
Best bit score
Read count
HSP count
Protein length (in amino acids)
Title (in the above-mentioned "protein name [pathogen name]" format)
"""
from __future__ import print_function
import argparse
import sys
from os.path import exists
# It's not clear that the PDF backend is the right choice here, but it
# works (i.e., the generation of PNG images works fine).
import matplotlib
matplotlib.use('PDF')
# These imports are here because dark.civ.proteins imports
# matplotlib.pyplot and we need to set the matplotlib backend before the
# import. So please don't move this import higher in this file.
from dark.civ.proteins import ProteinGrouper, SqliteIndex
from dark.colors import ColorsForCounts
from dark.taxonomy import (
addTaxonomyDatabaseCommandLineOptions,
parseTaxonomyDatabaseCommandLineOptions)
def main(db, taxdb, args):
grouper = ProteinGrouper(db, taxdb,
assetDir=args.assetDir,
sampleName=args.sampleName,
sampleNameRegex=args.sampleNameRegex,
format_=args.format,
saveReadLengths=args.showReadLengths,
titleRegex=args.titleRegex,
negativeTitleRegex=args.negativeTitleRegex,
pathogenDataDir=args.pathogenDataDir)
if args.filenames:
filenames = args.filenames
else:
filenames = (line[:-1] for line in sys.stdin)
for filename in filenames:
with open(filename) as fp:
grouper.addFile(filename, fp)
preambleText = ''
if args.preamble:
for preamble in args.preamble:
if exists(preamble):
preambleText += open(preamble).read()
else:
preambleText += preamble
if args.html:
readCountColors = ColorsForCounts(args.readCountColor,
args.defaultReadCountColor)
print(grouper.toHTML(
args.pathogenPanelFilename,
readCountColors=readCountColors,
minProteinFraction=args.minProteinFraction,
minProteinCount=args.minProteinCount,
pathogenType=args.pathogenType,
title=args.title, preamble=preambleText,
sampleIndexFilename=args.sampleIndexFilename,
omitVirusLinks=args.omitVirusLinks,
bootstrapTreeviewDir=args.bootstrapTreeviewDir))
else:
print(grouper.toStr(
title=args.title, preamble=args.preamble,
pathogenType=args.pathogenType))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Group proteins by the pathogen they're from.")
parser.add_argument(
'filenames', nargs='*',
help=('File names to read input from. The input will typically be '
'generated by alignment-panel-civ.py'))
parser.add_argument(
'--proteinGenomeDatabase', required=True,
help=('The filename of an Sqlite3 database holding protein and '
'genome information, as built by make-protein-database.py'))
# A mutually exclusive group for either --sampleName or --sampleNameRegex
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--sampleName',
help=('An (optional) sample name. Use when all input files are for a '
'single sample. Cannot be used with --sampleNameRegex.'))
group.add_argument(
'--sampleNameRegex',
help=('An (optional) regular expression that can be used to extract a '
'short sample name from full sample file name. The regular '
'expression must have a matching group (delimited by '
'parentheses) that captures the part of the file name that '
'should be used as the sample name.'))
parser.add_argument(
'--pathogenPanelFilename', nargs='?', const=None,
help=('An (optional) filename to write a pathogen-sample panel PNG '
'image to.'))
parser.add_argument(
'--sampleIndexFilename',
help=('An (optional) filename to write a sample index file to. '
'Lines in the file will have an integer index, a space, and '
'then the sample name. Only produced if --html is used '
'(because the pathogen-NNN-sample-MMM.fastq are only written '
'in that case).'))
parser.add_argument(
'--html', default=False, action='store_true',
help='If specified, output HTML instead of plain text.')
parser.add_argument(
'--format', default='fasta', choices=('fasta', 'fastq'),
help=('Give the format of the sequence files written by '
'alignment-panel-civ.py.'))
parser.add_argument(
'--minProteinFraction', type=float, default=0.0,
help=('The minimum fraction of proteins in a pathogen that must be '
'matched by a particular sample in order for that pathogen to '
'be displayed for that sample.'))
parser.add_argument(
'--minProteinCount', type=int, default=0,
help=('The minimum number of proteins in a pathogen that must be '
'matched by a particular sample in order for that pathogen to '
'be displayed for that sample.'))
parser.add_argument(
'--pathogenType', default='viral', choices=('bacterial', 'viral'),
help=('Specify the pathogen type. This option only affects the '
'language used in HTML output.'))
parser.add_argument(
'--showReadLengths', default=False, action='store_true',
help=('If specified, the HTML output (use --html to get this) will '
'contain the lengths of all reads that match proteins for a '
'pathogen.'))
parser.add_argument(
'--assetDir', default='out',
help=('The output directory where noninteractive-alignment-panel.py '
'put its plots and FASTA/FASTQ files.'))
parser.add_argument(
'--pathogenDataDir', default='pathogen-data',
help=('The directory where per-pathogen information (e.g., collected '
'reads across all samples) should be written.'))
parser.add_argument(
'--title',
help='The title to show at the top of the output.')
parser.add_argument(
'--preamble', action='append',
help=('Optional preamble text to show after the title. The argument '
'value may also name a file, in which case the file contents '
'will be inserted into the output. May be repeated.'))
parser.add_argument(
'--titleRegex',
help='A regex that pathogen names must match.')
parser.add_argument(
'--negativeTitleRegex',
help='a regex that pathogen names must not match.')
parser.add_argument(
'--omitVirusLinks', default=False, action='store_true',
help=('If specified, the HTML output (use --html to get this) for '
'viruses will not contain links to ICTV and ViralZone. '
'This should be used when working with viruses that do not yet '
'have names that can be looked up.'))
parser.add_argument(
'--defaultReadCountColor', default='black',
help=('The font color for read counts. This will be used for all '
'read counts that do not otherwise have a color due to use of '
'--readCountColor. Only valid if --html is used.'))
parser.add_argument(
'--readCountColor', action='append',
help=('Specify read count coloring. This option must be given as '
'a space separated "value color" pair. The value is an integer '
'read count and the color is any color specification that can '
'be given to CSS. This argument can be repeated. E.g., '
'--readCountColor "0.9 red" --readCountColor '
'"0.75 rgb(23, 190, 207)" --readCountColor "0.1 #CF3CF3". Read '
'counts will be colored using the color of the highest count '
'threshold they satisfy. The default is to color all read '
'counts with the --defaultReadCountColor color. Only valid if '
'--html is used.'))
parser.add_argument(
'--bootstrapTreeviewDir',
help=('The directory where the bootstrap treeview JS and CSS files '
'can be found. This can be a relative path from the location '
'where the output HTML will be served from or can be a URL '
'directory. In both cases, HTML will be emitted looking for '
'files with names bootstrap-treeview.min.css and '
'bootstrap-treeview.min.js in the directory (or URL directory). '
'Only valid if --html is used.'))
addTaxonomyDatabaseCommandLineOptions(parser)
args = parser.parse_args()
if not args.html:
if args.sampleIndexFilename:
print('It does not make sense to use --sampleIndexFilename '
'without also using --html', file=sys.stderr)
sys.exit(1)
if args.omitVirusLinks:
print('It does not make sense to use --omitVirusLinks '
'without also using --html', file=sys.stderr)
sys.exit(1)
if args.readCountColor:
print('It does not make sense to use --readCountColor '
'without also using --html', file=sys.stderr)
sys.exit(1)
if args.bootstrapTreeviewDir:
print('It does not make sense to use --bootstrapTreeviewDir '
'without also using --html', file=sys.stderr)
sys.exit(1)
if args.omitVirusLinks and args.pathogenType != 'viral':
print('The --omitVirusLinks option only makes sense with '
'--pathogenType viral', file=sys.stderr)
sys.exit(1)
with SqliteIndex(args.proteinGenomeDatabase) as db:
taxdb = parseTaxonomyDatabaseCommandLineOptions(args, parser)
main(db, taxdb, args)
|
|
import warnings
import pytest
import numpy as np
from numpy.lib.nanfunctions import _nan_mask, _replace_nan
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_no_warnings,
assert_raises, assert_array_equal, suppress_warnings
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
# Rows of _ndat with nans converted to ones
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
# Rows of _ndat with nans converted to zeros
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
class TestNanFunctions_MinMax:
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
match = "All-NaN slice encountered"
for func in self.nanfuncs:
with pytest.warns(RuntimeWarning, match=match):
out = func(array, axis=axis)
assert np.isnan(out).all()
assert out.dtype == array.dtype
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_subclass(self):
class MyNDArray(np.ndarray):
pass
# Check that it works and that type and
# shape are preserved
mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == (3,))
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == (3,))
res = f(mine)
assert_(res.shape == ())
# check that rows of nan are dealt with for subclasses (#4628)
mine[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(np.isnan(res[1]) and not np.isnan(res[0])
and not np.isnan(res[2]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mine)
assert_(res.shape == ())
assert_(res != np.nan)
assert_(len(w) == 0)
def test_object_array(self):
arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)
assert_equal(np.nanmin(arr), 1.0)
assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# assert_equal does not work on object arrays of nan
assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
class TestNanFunctions_ArgminArgmax:
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in")
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
for func in self.nanfuncs:
with pytest.raises(ValueError, match="All-NaN slice encountered"):
func(array, axis=axis)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_subclass(self):
class MyNDArray(np.ndarray):
pass
# Check that it works and that type and
# shape are preserved
mine = np.eye(3).view(MyNDArray)
for f in self.nanfuncs:
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == (3,))
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == (3,))
res = f(mine)
assert_(res.shape == ())
_TEST_ARRAYS = {
"0d": np.array(5),
"1d": np.array([127, 39, 93, 87, 46])
}
for _v in _TEST_ARRAYS.values():
_v.setflags(write=False)
@pytest.mark.parametrize(
"dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O",
)
@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys())
class TestNanFunctions_NumberTypes:
nanfuncs = {
np.nanmin: np.min,
np.nanmax: np.max,
np.nanargmin: np.argmin,
np.nanargmax: np.argmax,
np.nansum: np.sum,
np.nanprod: np.prod,
np.nancumsum: np.cumsum,
np.nancumprod: np.cumprod,
np.nanmean: np.mean,
np.nanmedian: np.median,
np.nanvar: np.var,
np.nanstd: np.std,
}
nanfunc_ids = [i.__name__ for i in nanfuncs]
@pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids)
@np.errstate(over="ignore")
def test_nanfunc(self, mat, dtype, nanfunc, func):
mat = mat.astype(dtype)
tgt = func(mat)
out = nanfunc(mat)
assert_almost_equal(out, tgt)
if dtype == "O":
assert type(out) is type(tgt)
else:
assert out.dtype == tgt.dtype
@pytest.mark.parametrize(
"nanfunc,func",
[(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)],
ids=["nanquantile", "nanpercentile"],
)
def test_nanfunc_q(self, mat, dtype, nanfunc, func):
mat = mat.astype(dtype)
tgt = func(mat, q=1)
out = nanfunc(mat, q=1)
assert_almost_equal(out, tgt)
if dtype == "O":
assert type(out) is type(tgt)
else:
assert out.dtype == tgt.dtype
@pytest.mark.parametrize(
"nanfunc,func",
[(np.nanvar, np.var), (np.nanstd, np.std)],
ids=["nanvar", "nanstd"],
)
def test_nanfunc_ddof(self, mat, dtype, nanfunc, func):
mat = mat.astype(dtype)
tgt = func(mat, ddof=0.5)
out = nanfunc(mat, ddof=0.5)
assert_almost_equal(out, tgt)
if dtype == "O":
assert type(out) is type(tgt)
else:
assert out.dtype == tgt.dtype
class SharedNanFunctionsTestsMixin:
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_subclass(self):
class MyNDArray(np.ndarray):
pass
# Check that it works and that type and
# shape are preserved
array = np.eye(3)
mine = array.view(MyNDArray)
for f in self.nanfuncs:
expected_shape = f(array, axis=0).shape
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == expected_shape)
expected_shape = f(array, axis=1).shape
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == expected_shape)
expected_shape = f(array).shape
res = f(mine)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == expected_shape)
class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
for func, identity in zip(self.nanfuncs, [0, 1]):
out = func(array, axis=axis)
assert np.all(out == identity)
assert out.dtype == array.dtype
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan)
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
for func, identity in zip(self.nanfuncs, [0, 1]):
out = func(array)
assert np.all(out == identity)
assert out.dtype == array.dtype
def test_empty(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
mat = np.zeros((0, 3))
tgt = tgt_value*np.ones((0, 3))
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = mat
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = np.zeros((0))
res = f(mat, axis=None)
assert_equal(res, tgt)
def test_keepdims(self):
for f, g in zip(self.nanfuncs, self.stdfuncs):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = f(mat, axis=axis, out=None)
res = g(mat, axis=axis, out=None)
assert_(res.ndim == tgt.ndim)
for f in self.nanfuncs:
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
rs = np.random.RandomState(0)
d[rs.rand(*d.shape) < 0.5] = np.nan
res = f(d, axis=None)
assert_equal(res.shape, (1155,))
for axis in np.arange(4):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
res = np.nancumprod(_ndat, axis=axis)
assert_almost_equal(res, tgt)
tgt = np.cumsum(_ndat_zeros,axis=axis)
res = np.nancumsum(_ndat, axis=axis)
assert_almost_equal(res, tgt)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.eye(3)
for axis in (-2, -1, 0, 1):
tgt = rf(mat, axis=axis)
res = nf(mat, axis=axis, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
sup.filter(np.ComplexWarning)
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 0)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)"
for func in self.nanfuncs:
with pytest.warns(RuntimeWarning, match=match):
out = func(array, axis=axis)
assert np.isnan(out).all()
# `nanvar` and `nanstd` convert complex inputs to their
# corresponding floating dtype
if func is np.nanmean:
assert out.dtype == array.dtype
else:
assert out.dtype == np.abs(array).dtype
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
_TIME_UNITS = (
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"
)
# All `inexact` + `timdelta64` type codes
_TYPE_CODES = list(np.typecodes["AllFloat"])
_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS]
class TestNanFunctions_Median:
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", _TYPE_CODES)
def test_allnans(self, dtype, axis):
mat = np.full((3, 3), np.nan).astype(dtype)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
output = np.nanmedian(mat, axis=axis)
assert output.dtype == mat.dtype
assert np.isnan(output).all()
if axis is None:
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 3)
# Check scalar
scalar = np.array(np.nan).astype(dtype)[()]
output_scalar = np.nanmedian(scalar)
assert output_scalar.dtype == scalar.dtype
assert np.isnan(output_scalar)
if axis is None:
assert_(len(sup.log) == 2)
else:
assert_(len(sup.log) == 4)
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.nanmedian, d, axis=-5)
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5))
assert_raises(np.AxisError, np.nanmedian, d, axis=4)
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for inf in [np.inf, -np.inf]:
a = np.array([[inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])
assert_equal(np.nanmedian(a), inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, inf],
[np.nan, np.nan, inf]])
assert_equal(np.nanmedian(a), inf)
assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
assert_equal(np.nanmedian(a, axis=1), inf)
# no mask path
a = np.array([[inf, inf], [inf, inf]])
assert_equal(np.nanmedian(a, axis=1), inf)
a = np.array([[inf, 7, -inf, -9],
[-10, np.nan, np.nan, 5],
[4, np.nan, np.nan, inf]],
dtype=np.float32)
if inf > 0:
assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
assert_equal(np.nanmedian(a), 4.5)
else:
assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
assert_equal(np.nanmedian(a), -2.5)
assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])
for i in range(0, 10):
for j in range(1, 10):
a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
assert_equal(np.nanmedian(a), inf)
assert_equal(np.nanmedian(a, axis=1), inf)
assert_equal(np.nanmedian(a, axis=0),
([np.nan] * i) + [inf] * j)
a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
assert_equal(np.nanmedian(a), -inf)
assert_equal(np.nanmedian(a, axis=1), -inf)
assert_equal(np.nanmedian(a, axis=0),
([np.nan] * i) + [-inf] * j)
class TestNanFunctions_Percentile:
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):
out = np.nanpercentile(array, 60, axis=axis)
assert np.isnan(out).all()
assert out.dtype == array.dtype
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_equal(np.nanpercentile(0., 100), 0.)
a = np.arange(6)
r = np.nanpercentile(a, 50, axis=0)
assert_equal(r, 2.5)
assert_(np.isscalar(r))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5)
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5))
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4)
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
def test_multiple_percentiles(self):
perc = [50, 100]
mat = np.ones((4, 3))
nan_mat = np.nan * mat
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "All-NaN slice encountered")
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val.shape, val.shape)
val = np.percentile(large_mat, perc, axis=axis,
keepdims=keepdim)
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val, val)
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
class TestNanFunctions_Quantile:
# most of this is already tested by TestPercentile
def test_regression(self):
ar = np.arange(24).reshape(2, 3, 4).astype(float)
ar[0][1] = np.nan
assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
assert_equal(np.nanquantile(ar, q=0.5, axis=0),
np.nanpercentile(ar, q=50, axis=0))
assert_equal(np.nanquantile(ar, q=0.5, axis=1),
np.nanpercentile(ar, q=50, axis=1))
assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
np.nanpercentile(ar, q=[50], axis=1))
assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
np.nanpercentile(ar, q=[25, 50, 75], axis=1))
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.nanquantile(x, 0), 0.)
assert_equal(np.nanquantile(x, 1), 3.5)
assert_equal(np.nanquantile(x, 0.5), 1.75)
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
p = p0.copy()
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
p0 = p0.tolist()
p = p.tolist()
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
], ids=["0d", "2d"])
def test_allnans(self, axis, dtype, array):
if axis is not None and array.ndim == 0:
pytest.skip(f"`axis != None` not supported for 0d arrays")
array = array.astype(dtype)
with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):
out = np.nanquantile(array, 1, axis=axis)
assert np.isnan(out).all()
assert out.dtype == array.dtype
@pytest.mark.parametrize("arr, expected", [
# array of floats with some nans
(np.array([np.nan, 5.0, np.nan, np.inf]),
np.array([False, True, False, True])),
# int64 array that can't possibly have nans
(np.array([1, 5, 7, 9], dtype=np.int64),
True),
# bool array that can't possibly have nans
(np.array([False, True, False, True]),
True),
# 2-D complex array with nans
(np.array([[np.nan, 5.0],
[np.nan, np.inf]], dtype=np.complex64),
np.array([[False, True],
[False, True]])),
])
def test__nan_mask(arr, expected):
for out in [None, np.empty(arr.shape, dtype=np.bool_)]:
actual = _nan_mask(arr, out=out)
assert_equal(actual, expected)
# the above won't distinguish between True proper
# and an array of True values; we want True proper
# for types that can't possibly contain NaN
if type(expected) is not np.ndarray:
assert actual is True
def test__replace_nan():
""" Test that _replace_nan returns the original array if there are no
NaNs, not a copy.
"""
for dtype in [np.bool_, np.int32, np.int64]:
arr = np.array([0, 1], dtype=dtype)
result, mask = _replace_nan(arr, 0)
assert mask is None
# do not make a copy if there are no nans
assert result is arr
for dtype in [np.float32, np.float64]:
arr = np.array([0, 1], dtype=dtype)
result, mask = _replace_nan(arr, 2)
assert (mask == False).all()
# mask is not None, so we make a copy
assert result is not arr
assert_equal(result, arr)
arr_nan = np.array([0, 1, np.nan], dtype=dtype)
result_nan, mask_nan = _replace_nan(arr_nan, 2)
assert_equal(mask_nan, np.array([False, False, True]))
assert result_nan is not arr_nan
assert_equal(result_nan, np.array([0, 1, 2]))
assert np.isnan(arr_nan[-1])
|
|
from copy import copy
from datetime import datetime, timedelta
import json
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.module_loading import import_by_path
from enum import Enum
from jsonfield import JSONField
from manager_utils import ManagerUtilsManager
from regex_field import RegexField
#######################################################
# Misc Utils
#######################################################
import six
def load_function(dotted_path):
return import_by_path(dotted_path)
class ExtendedEnum(Enum):
@classmethod
def name_to_value(cls, name):
return getattr(cls, name).value
@classmethod
def choices(cls):
return [(c.value, c.name) for c in list(cls)]
#######################################################
# Core Issue models
#######################################################
class IssueStatus(ExtendedEnum):
"""
Enum listing possible Status values for an Issue.
"""
Open = 0
Resolved = 1
Wont_fix = 2
class IssueManager(ManagerUtilsManager):
"""
Custom model manager for the Issue model.
"""
def get_open_issues(self):
"""
Retrive a queryset of all Open Issues.
"""
return self.filter(status=IssueStatus.Open.value)
def reopen_issue(self, name, **kwargs):
"""
Reopen the specified Issue.
"""
kwargs['status'] = IssueStatus.Open.value
self.filter(name=name).update(**kwargs)
def is_wont_fix(self, **kwargs):
"""
Does the specified issue exist with a status of Wont_fix?
"""
return self.filter(status=IssueStatus.Wont_fix.value, **kwargs).exists()
def resolve_issue(self, **kwargs):
"""
Resolve the specified issue.
"""
self.filter(**kwargs).update(status=IssueStatus.Resolved.value)
def maybe_open_issue(self, name, **kwargs):
"""
Create the specified Issue unless:
1) It is already open - if so, return it
2) It already exists and is marked as Wont_fix
Returns a tuple (Issue, Boolean) containing the Issue and if it was created.
"""
if self.filter(name=name, status=IssueStatus.Wont_fix.value).exists():
# Exists but is Wont_fix
return None, False
if self.filter(name=name, status=IssueStatus.Open.value).exists():
# Exists and is Open
return self.filter(name=name, status=IssueStatus.Open.value).latest('creation_time'), False
# Either an Issue of this name doesn't exist or it is Resolved; either way, create one!
return self.create(name=name, **kwargs), True
@six.python_2_unicode_compatible
class BaseIssue(models.Model):
name = models.TextField()
details = JSONField(null=True, blank=True)
creation_time = models.DateTimeField(auto_now_add=True)
status = models.IntegerField(choices=IssueStatus.choices(), default=IssueStatus.Open.value)
resolved_time = models.DateTimeField(null=True, blank=True)
objects = IssueManager()
class Meta:
abstract = True
def __str__(self):
return 'Issue: {name} - {status}'.format(name=self.name, status=IssueStatus(self.status))
@property
def is_open(self):
return self.status == IssueStatus.Open.value
@property
def is_resolved(self):
return self.status == IssueStatus.Resolved.value
@property
def is_wont_fix(self):
return self.status == IssueStatus.Wont_fix.value
class ModelIssueManager(IssueManager):
def _replace_record_with_content_type(self, kwargs):
kwargs = copy(kwargs)
record = kwargs.pop('record', None)
if record:
kwargs['record_id'], kwargs['record_type'] = (
record.id, ContentType.objects.get_for_model(record)
)
return kwargs
def reopen_issue(self, *args, **kwargs):
"""
Reopen the specified Issue.
"""
kwargs = self._replace_record_with_content_type(kwargs)
return super(ModelIssueManager, self).reopen_issue(*args, **kwargs)
def is_wont_fix(self, *args, **kwargs):
"""
Does the specified issue exist with a status of Wont_fix?
"""
kwargs = self._replace_record_with_content_type(kwargs)
return super(ModelIssueManager, self).is_wont_fix(*args, **kwargs)
def resolve_issue(self, *args, **kwargs):
"""
Resolve the specified issue.
"""
kwargs = self._replace_record_with_content_type(kwargs)
return super(ModelIssueManager, self).resolve_issue(*args, **kwargs)
class Issue(BaseIssue):
"""
Particular problems or issues that the system needs should keep a record of.
"""
pass
class ModelIssue(BaseIssue):
"""
An issue involving a particular entry in the database.
"""
record_type = models.ForeignKey(ContentType, related_name='+', null=True)
record_id = models.PositiveIntegerField(default=0)
record = generic.GenericForeignKey('record_type', 'record_id')
objects = ModelIssueManager()
@six.python_2_unicode_compatible
class IssueAction(models.Model):
"""
A response that was taken to address a particular issue.
"""
issue = models.ForeignKey(Issue, related_name='executed_actions')
responder_action = models.ForeignKey('issue.ResponderAction')
execution_time = models.DateTimeField(auto_now_add=True)
success = models.BooleanField(default=True)
details = JSONField(null=True, blank=True)
def __str__(self):
return (
'IssueResponse: {self.issue.name} - {self.responder_action} - '
'{self.success} at {self.execution_time}'.format(self=self)
)
#######################################################
# Issue Response models
#######################################################
@six.python_2_unicode_compatible
class Responder(models.Model):
"""
When an Issue is created, there is often an appropriate response.
A Responder record encodes a particular type of Issue to watch for and what actions
to take when an Issue is opened.
Examples might be emailing an admin, opening a ticket in PagerDuty, or running a bit
of code to fix a problem.
The actions to be taken are implemented as ResponderActions that ForeignKey to a particular
Responder record.
"""
watch_pattern = RegexField(null=True, max_length=128)
def __str__(self):
return 'Responder: {watch_pattern.pattern}'.format(watch_pattern=self.watch_pattern)
def respond(self, issue):
"""
Check if the provided issue matches our watch pattern.
If it does, execute the associated ResponderActions.
"""
if self._match(issue.name):
self._execute(issue)
return True
else:
return False
def _match(self, issue_name):
return self.watch_pattern.match(issue_name)
def _execute(self, issue):
"""
Execute in order all of the ResponderActions associated with this Responder.
"""
IssueAction.objects.bulk_create(
[
a.execute(issue) for a in self._get_pending_actions_for_issue(issue)
if a.is_time_to_execute(issue)
])
def _get_pending_actions_for_issue(self, issue):
already_executed_action_pks = issue.executed_actions.values_list('responder_action__pk', flat=True).all()
return self.actions.exclude(pk__in=already_executed_action_pks).order_by('delay_sec')
@six.python_2_unicode_compatible
class ResponderAction(models.Model):
"""
A particular action to take in response to some Issue.
Any function can be specified in the target_function field, though some initial
helpers are defined in issue.actions
"""
responder = models.ForeignKey(Responder, related_name='actions')
# 'buffer' period between this action and the next.
delay_sec = models.IntegerField()
# What action do we want to occur
target_function = models.TextField()
function_kwargs = JSONField(default={})
def __str__(self):
return 'ResponderAction: {responder} - {target_function} - {function_kwargs}'.format(
responder=self.responder, target_function=self.target_function, function_kwargs=self.function_kwargs)
@property
def delay(self):
return timedelta(seconds=self.delay_sec)
def is_time_to_execute(self, issue):
"""
A ResponseAction is only executable if enough time has passed since the previous action.
"""
return (issue.creation_time + self.delay) <= datetime.utcnow()
def execute(self, issue):
"""
Execute the configured action.
"""
try:
details = load_function(self.target_function)(issue, **self.function_kwargs)
kwargs = self.construct_issue_action_kwargs(True, details)
except Exception as e:
kwargs = self.construct_issue_action_kwargs(False, str(e))
return IssueAction(issue=issue, **kwargs)
def construct_issue_action_kwargs(self, success, failure_details=None):
"""
Construct a summary of this action execution.
"""
return {
'responder_action': self,
'execution_time': str(datetime.utcnow()),
'success': success,
'details': json.dumps(failure_details),
}
#######################################################
# Assertion models
#######################################################
class BaseAssertion(models.Model):
"""
A class for tracking that certain properties of the web application are true.
When an Assertion fails, an Issue is created to note this.
Think of it as a cross between the classic 'assert' statement and a traditional software monitoring
solution, like 'Nagios'.
"""
# Class do we check to verify everything is copacetic?
check_function = models.TextField()
# Assertion name; also the name of any Issue created
name = models.TextField()
class Meta:
abstract = True
@property
def issue_class(self):
return Issue
def check_assertion(self, *args, **kwargs):
"""
Run the configured check to detect problems and create or resolve issues as needed.
"""
(all_is_well, details) = load_function(self.check_function)(**kwargs)
if not all_is_well:
kwargs['details'] = details
self._open_or_update_issue(**kwargs)
else:
self._resolve_open_issue(**kwargs)
return all_is_well
def _open_or_update_issue(self, details, **kwargs):
"""
Open (or re-open) an issue with the name unless one exists with a status of Wont_fix.
"""
return self.issue_class.objects.maybe_open_issue(self.name, **kwargs)[0]
def _resolve_open_issue(self, **kwargs):
"""
Resolve any issues with this name.
"""
self.issue_class.objects.resolve_issue(name=self.name, **kwargs)
class Assertion(BaseAssertion):
pass
class ModelAssertion(BaseAssertion):
"""
A class for making assertions about models.
An Issue is created for any record for which the assertion fails.
"""
model_type = models.ForeignKey(ContentType, related_name='+')
@property
def issue_class(self):
return ModelIssue
@property
def queryset(self):
"""
Queryset of records to iterate over.
"""
return self.model_type.model_class().objects.all()
def check_assertion(self, **kwargs):
"""
Run the configured check against all records in the queryset to detect problems.
Returns True if the assertion holds true for all records of the configured model type.
"""
def check_record(record):
return super(ModelAssertion, self).check_assertion(record=record, **kwargs)
return all(map(check_record, self.queryset))
|
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaql.tests
class TestMath(yaql.tests.TestCase):
def test_binary_plus_int(self):
res = self.eval('2 + 3')
self.assertEqual(5, res)
self.assertIsInstance(res, int)
def test_binary_plus_float(self):
res = self.eval('2 + 3.0')
self.assertEqual(5, res)
self.assertIsInstance(res, float)
res = self.eval('2.3+3.5')
self.assertEqual(5.8, res)
self.assertIsInstance(res, float)
def test_binary_minus_int(self):
res = self.eval('12 -3')
self.assertEqual(9, res)
self.assertIsInstance(res, int)
def test_binary_minus_float(self):
res = self.eval('1 - 2.1')
self.assertEqual(-1.1, res)
self.assertIsInstance(res, float)
res = self.eval('123.321- 0.321')
self.assertEqual(123.0, res)
self.assertIsInstance(res, float)
def test_multiplication_int(self):
res = self.eval('3 * 2')
self.assertEqual(6, res)
self.assertIsInstance(res, int)
self.assertEqual(-6, self.eval('3 * -2'))
self.assertEqual(6, self.eval('-3 * -2'))
def test_multiplication_float(self):
res = self.eval('3.0 * 2.0')
self.assertEqual(6.0, res)
self.assertIsInstance(res, float)
self.assertAlmostEqual(-6.51, self.eval('3.1 * -2.1'))
self.assertAlmostEqual(6.51, self.eval('-3.1 * -2.1'))
def test_division(self):
self.assertEqual(3, self.eval('7 / 2'))
self.assertEqual(-4, self.eval('7 / -2'))
self.assertAlmostEqual(2.5, self.eval('5 / 2.0'))
self.assertAlmostEqual(2.5, self.eval('5.0 / 2'))
self.assertAlmostEqual(-2.5, self.eval('-5.0 / 2.0'))
self.assertRaises(ZeroDivisionError, self.eval, '7 / 0')
self.assertRaises(ZeroDivisionError, self.eval, '7 / -0.0')
def test_brackets(self):
self.assertEqual(-4, self.eval('1 - (2) - 3'))
self.assertEqual(2, self.eval('1 - (2 - 3)'))
def test_unary_minus(self):
self.assertEqual(-4, self.eval('-4'))
self.assertEqual(-12.0, self.eval('-12.0'))
self.assertEqual(4, self.eval('3--1'))
self.assertEqual(2, self.eval('3+-1'))
self.assertAlmostEqual(4.3, self.eval('3.2 - -1.1'))
self.assertEqual(2, self.eval('-(1-3)'))
def test_unary_plus(self):
self.assertEqual(4, self.eval('+4'))
self.assertEqual(12.0, self.eval('+12.0'))
self.assertEqual(2, self.eval('3-+1'))
self.assertEqual(4, self.eval('3++1'))
self.assertAlmostEqual(2.1, self.eval('3.2 - +1.1'))
def test_modulo_int(self):
res = self.eval('9 mod 5')
self.assertEqual(4, res)
self.assertIsInstance(res, int)
self.assertEqual(-1, self.eval('9 mod -5'))
def test_modulo_float(self):
res = self.eval('9.0 mod 5')
self.assertEqual(4.0, res)
self.assertIsInstance(res, float)
res = self.eval('9 mod 5.0')
self.assertEqual(4.0, res)
self.assertIsInstance(res, float)
res = self.eval('9.0 mod 5.0')
self.assertEqual(4.0, res)
self.assertIsInstance(res, float)
self.assertAlmostEqual(-1.1, self.eval('9.1 mod -5.1'))
def test_abs(self):
self.assertEqual(4, self.eval('abs(-4)'))
self.assertEqual(4, self.eval('abs(4)'))
self.assertEqual(4.4, self.eval('abs(-4.4)'))
def test_gt(self):
res = self.eval('5 > 3')
self.assertIsInstance(res, bool)
self.assertTrue(res)
self.assertFalse(self.eval('3 > 3'))
def test_lt(self):
res = self.eval('3 < 5')
self.assertIsInstance(res, bool)
self.assertTrue(res)
self.assertFalse(self.eval('3 < 3'))
self.assertTrue(self.eval('2.5 < 3'))
def test_gte(self):
res = self.eval('5 >= 3')
self.assertIsInstance(res, bool)
self.assertTrue(res)
self.assertTrue(self.eval('3 >= 3'))
self.assertTrue(self.eval('3.5 > 3'))
self.assertFalse(self.eval('2 >= 3'))
def test_lte(self):
res = self.eval('3 <= 5')
self.assertIsInstance(res, bool)
self.assertTrue(res)
self.assertTrue(self.eval('3 <= 3'))
self.assertFalse(self.eval('3 <= 2'))
def test_eq(self):
self.assertTrue(self.eval('5 = 5'))
self.assertTrue(self.eval('1.0 = 1'))
self.assertFalse(self.eval('5 = 6'))
def test_neq(self):
self.assertFalse(self.eval('5 != 5'))
self.assertFalse(self.eval('0 != 0.0'))
self.assertTrue(self.eval('5 != 6'))
def test_zero_division(self):
self.assertRaises(ZeroDivisionError, self.eval, '0/0')
def test_random(self):
self.assertTrue(self.eval('with(random()) -> $ >= 0 and $ < 1'))
self.assertTrue(self.eval('with(random(2, 5)) -> $ >= 2 and $ <= 5'))
def test_int(self):
self.assertEqual(5, self.eval("int('5')"))
self.assertEqual(5, self.eval('int(5.2)'))
self.assertEqual(0, self.eval('int(null)'))
def test_float(self):
self.assertAlmostEqual(-1.23, self.eval("float('-1.23')"))
self.assertEqual(0.0, self.eval('float(null)'))
def test_bitwise_or(self):
self.assertEqual(3, self.eval('bitwiseOr(1, 3)'))
self.assertEqual(3, self.eval('bitwiseOr(1, 2)'))
def test_bitwise_and(self):
self.assertEqual(1, self.eval('bitwiseAnd(1, 3)'))
self.assertEqual(0, self.eval('bitwiseAnd(1, 2)'))
def test_bitwise_xor(self):
self.assertEqual(2, self.eval('bitwiseXor(1, 3)'))
self.assertEqual(3, self.eval('bitwiseXor(1, 2)'))
def test_bitwise_not(self):
self.assertEqual(-2, self.eval('bitwiseNot(1)'))
def test_shift_bits_left(self):
self.assertEqual(32, self.eval('shiftBitsLeft(1, 5)'))
def test_shift_bits_right(self):
self.assertEqual(2, self.eval('shiftBitsRight(32, 4)'))
self.assertEqual(0, self.eval('shiftBitsRight(32, 6)'))
def test_pow(self):
self.assertEqual(32, self.eval('pow(2, 5)'))
self.assertEqual(4, self.eval('pow(2, 5, 7)'))
def test_sign(self):
self.assertEqual(1, self.eval('sign(123)'))
self.assertEqual(-1, self.eval('sign(-123)'))
self.assertEqual(0, self.eval('sign(0)'))
def test_round(self):
self.assertAlmostEqual(2.0, self.eval('round(2.3)'))
self.assertAlmostEqual(2.3, self.eval('round(2.345, 1)'))
def test_is_integer(self):
self.assertTrue(self.eval('isInteger(-2)'))
self.assertTrue(self.eval('isInteger(2)'))
self.assertFalse(self.eval('isInteger(2.3)'))
self.assertFalse(self.eval('isInteger(abc)'))
self.assertFalse(self.eval('isInteger(true)'))
def test_is_number(self):
self.assertTrue(self.eval('isNumber(-2)'))
self.assertTrue(self.eval('isNumber(2)'))
self.assertTrue(self.eval('isNumber(2.3)'))
self.assertFalse(self.eval('isNumber(abc)'))
self.assertFalse(self.eval('isNumber(true)'))
|
|
#! /usr/bin/env python
"""
Python API for KB SRU
"""
import sys
import urllib
import requests
from lxml import etree
SRU_BASEURL = 'http://jsru.kb.nl/sru/sru'
SRU_BASEURL += '?version=1.2&maximumRecords=%i'
SRU_BASEURL += '&operation=searchRetrieve'
SRU_BASEURL += '&startRecord=%i'
SRU_BASEURL += '&recordSchema=%s'
SRU_BASEURL += '&x-collection=%s&query=%s'
SETS = {'ANP': {'collection': 'ANP',
'description_en': 'Radio Bulletins ANP Press Agency',
'description_nl': 'ANP Radiobulletins Digitaal',
'metadataPrefix': 'didl',
'recordschema': 'dcx',
'setname': 'anp',
'time_period': [1937, 1989]},
'DPO': {'collection': 'DPO_boekdeel',
'description_en': 'Early Dutch Books Online',
'description_nl': 'Early Dutch Books Online',
'metadataPrefix': 'didl',
'recordschema': 'ddd',
'setname': 'DPO',
'time_period': [1781, 1800]},
'BYVANCK': {'description_en': 'Medieval Illuminated Manuscripts',
'description_nl': 'Middeleeuwse Verluchte Handschriften',
'metadataPrefix': 'dcx',
'setname': 'BYVANCK',
'time_period': [500, 1500]},
'SGD': {'description_en': 'States General Digital',
'description_nl': 'Staten-Generaal Digitaal',
'metadataPrefix': 'dcx',
'setname': 'sgd:register',
'time_period': [1962, 1994]},
'GGC': {'collection': 'GGC',
'description_en': 'General Catalogue KB',
'description_nl': 'Algemene Catalogus KB',
'metadataPrefix': 'dcx',
'recordschema': 'dcx',
'setname': 'ggc',
'time_period': [1937, 2021]}} # No idea what to use here?
# Name spaces in GGC records
srw_ns = 'http://www.loc.gov/zing/srw/'
tel_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
dc_ns = 'http://purl.org/dc/elements/1.1/'
dcterms_ns = 'http://purl.org/dc/terms/'
dcx_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
NSMAPGGC = {"srw": srw_ns,
"tel": tel_ns,
"xsi": xsi_ns,
"dc": dc_ns,
"dcterms": dcterms_ns,
"dcx": dcx_ns}
class response():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def getElementText(self, tagName, attributeName, attributeValue):
# Returns text content of all elements for which tag matches tagName,
# and attribute value equals attributeValue. Set attributeName to empty
# string to get all tagName matches.
textFields = []
for r in self.record_data.iter():
if r.tag == tagName:
if attributeName != '':
try:
if r.attrib[attributeName] == attributeValue:
textFields.append(r.text)
except KeyError:
pass
else:
textFields.append(r.text)
return textFields
@property
def records(self):
if self.sru.nr_of_records == 0:
record_data = "<xml></xml>"
else:
ns = {'zs': 'http://www.loc.gov/zing/srw/'}
record_data = self.record_data.xpath("zs:records/zs:record",
namespaces=ns)[0]
return record(record_data, self.sru)
# Below property functions all return a list with all instances that satisfy
# criteria
@property
def typesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def typesDCMI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'DCMIType'))
@property
def identifiersISBN(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISBN'))
@property
def identifiersBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def identifiersURI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def identifiersOCLC(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'OCLC'))
@property
def languagesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def languagesEnglish(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'en'))
@property
def languagesFrench(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'fr'))
@property
def languagesISO639(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISO639-2'))
@property
def dates(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}date',
'',
''))
@property
def extents(self):
return(self.getElementText('{http://purl.org/dc/terms/}extent',
'',
''))
@property
def creators(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}creator',
'',
''))
@property
def contributors(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}contributor',
'',
''))
@property
def titles(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'',
''))
@property
def titlesMain(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:maintitle'))
@property
def titlesIntermediate(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:intermediatetitle'))
@property
def publishers(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}publisher',
'',
''))
@property
def countries(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}country',
'',
''))
@property
def subjectsBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def subjectsISO9707(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'ISO_9707_[Brinkman]'))
@property
def subjectsUNESCO(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'UNESCO'))
@property
def collectionIdentifiers(self):
return(self.getElementText('{http://purl.org/dc/terms/}isPartOf',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:collectionIdentifier'))
@property
def recordIdentifiersURI(self):
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}recordIdentifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def annotations(self):
# Note that annotations sometimes contain language or itenID attibutes;
# ignored for now (collect everything).
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}annotation',
'',
''))
class record():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def __iter__(self):
return self
# This works under Python 2.7
def next(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
# This works under Python 3
def __next__(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
class sru():
DEBUG = False
collection = False
maximumrecords = 50
nr_of_records = 0
query = ""
recordschema = False
sru_collections = SETS
startrecord = 0
def search(self, query, collection=False,
startrecord=1, maximumrecords=1, recordschema=False):
self.maximumrecords = maximumrecords
if sys.version.startswith('3'):
self.query = urllib.parse.quote_plus(query)
elif sys.version.startswith('2'):
self.query = urllib.quote_plus(query)
self.startrecord = startrecord
if collection not in self.sru_collections:
raise Exception('Unknown collection')
self.collection = self.sru_collections[collection]['collection']
if not self.collection:
raise Exception('Error, no collection specified')
if not recordschema:
self.recordschema = self.sru_collections[collection]['recordschema']
else:
self.recordschema = recordschema
record_data = self.run_query()
nr_of_records = [i.text for i in record_data.iter() if
i.tag.endswith('numberOfRecords')][0]
self.nr_of_records = int(nr_of_records)
if self.nr_of_records > 0:
return response(record_data, self)
return False
def run_query(self):
url = SRU_BASEURL % (self.maximumrecords, self.startrecord,
self.recordschema, self.collection, self.query)
if self.DEBUG:
sys.stdout.write(url)
r = requests.get(url)
if not r.status_code == 200:
raise Exception('Error while getting data from %s' % url)
record_data = etree.fromstring(r.content)
return record_data
|
|
import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_unicode(urllib.unquote(smart_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, unicode)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_unicode(urllib.unquote_plus(smart_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
[smart_str(i) for i in v] if isinstance(v, (list,tuple)) else smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if not 0 <= i <= sys.maxint:
raise ValueError("Base36 conversion input too large or incorrect type.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AddressSpace
from ._models_py3 import ApplicationGateway
from ._models_py3 import ApplicationGatewayAuthenticationCertificate
from ._models_py3 import ApplicationGatewayAvailableSslOptions
from ._models_py3 import ApplicationGatewayAvailableSslPredefinedPolicies
from ._models_py3 import ApplicationGatewayAvailableWafRuleSetsResult
from ._models_py3 import ApplicationGatewayBackendAddress
from ._models_py3 import ApplicationGatewayBackendAddressPool
from ._models_py3 import ApplicationGatewayBackendHealth
from ._models_py3 import ApplicationGatewayBackendHealthHttpSettings
from ._models_py3 import ApplicationGatewayBackendHealthPool
from ._models_py3 import ApplicationGatewayBackendHealthServer
from ._models_py3 import ApplicationGatewayBackendHttpSettings
from ._models_py3 import ApplicationGatewayConnectionDraining
from ._models_py3 import ApplicationGatewayFirewallDisabledRuleGroup
from ._models_py3 import ApplicationGatewayFirewallRule
from ._models_py3 import ApplicationGatewayFirewallRuleGroup
from ._models_py3 import ApplicationGatewayFirewallRuleSet
from ._models_py3 import ApplicationGatewayFrontendIPConfiguration
from ._models_py3 import ApplicationGatewayFrontendPort
from ._models_py3 import ApplicationGatewayHttpListener
from ._models_py3 import ApplicationGatewayIPConfiguration
from ._models_py3 import ApplicationGatewayListResult
from ._models_py3 import ApplicationGatewayPathRule
from ._models_py3 import ApplicationGatewayProbe
from ._models_py3 import ApplicationGatewayProbeHealthResponseMatch
from ._models_py3 import ApplicationGatewayRedirectConfiguration
from ._models_py3 import ApplicationGatewayRequestRoutingRule
from ._models_py3 import ApplicationGatewaySku
from ._models_py3 import ApplicationGatewaySslCertificate
from ._models_py3 import ApplicationGatewaySslPolicy
from ._models_py3 import ApplicationGatewaySslPredefinedPolicy
from ._models_py3 import ApplicationGatewayUrlPathMap
from ._models_py3 import ApplicationGatewayWebApplicationFirewallConfiguration
from ._models_py3 import ApplicationSecurityGroup
from ._models_py3 import ApplicationSecurityGroupListResult
from ._models_py3 import AuthorizationListResult
from ._models_py3 import Availability
from ._models_py3 import AvailableProvidersList
from ._models_py3 import AvailableProvidersListCity
from ._models_py3 import AvailableProvidersListCountry
from ._models_py3 import AvailableProvidersListParameters
from ._models_py3 import AvailableProvidersListState
from ._models_py3 import AzureAsyncOperationResult
from ._models_py3 import AzureReachabilityReport
from ._models_py3 import AzureReachabilityReportItem
from ._models_py3 import AzureReachabilityReportLatencyInfo
from ._models_py3 import AzureReachabilityReportLocation
from ._models_py3 import AzureReachabilityReportParameters
from ._models_py3 import BGPCommunity
from ._models_py3 import BackendAddressPool
from ._models_py3 import BgpPeerStatus
from ._models_py3 import BgpPeerStatusListResult
from ._models_py3 import BgpServiceCommunity
from ._models_py3 import BgpServiceCommunityListResult
from ._models_py3 import BgpSettings
from ._models_py3 import ConnectionMonitor
from ._models_py3 import ConnectionMonitorDestination
from ._models_py3 import ConnectionMonitorListResult
from ._models_py3 import ConnectionMonitorParameters
from ._models_py3 import ConnectionMonitorQueryResult
from ._models_py3 import ConnectionMonitorResult
from ._models_py3 import ConnectionMonitorResultProperties
from ._models_py3 import ConnectionMonitorSource
from ._models_py3 import ConnectionResetSharedKey
from ._models_py3 import ConnectionSharedKey
from ._models_py3 import ConnectionStateSnapshot
from ._models_py3 import ConnectivityDestination
from ._models_py3 import ConnectivityHop
from ._models_py3 import ConnectivityInformation
from ._models_py3 import ConnectivityIssue
from ._models_py3 import ConnectivityParameters
from ._models_py3 import ConnectivitySource
from ._models_py3 import DhcpOptions
from ._models_py3 import Dimension
from ._models_py3 import DnsNameAvailabilityResult
from ._models_py3 import EffectiveNetworkSecurityGroup
from ._models_py3 import EffectiveNetworkSecurityGroupAssociation
from ._models_py3 import EffectiveNetworkSecurityGroupListResult
from ._models_py3 import EffectiveNetworkSecurityRule
from ._models_py3 import EffectiveRoute
from ._models_py3 import EffectiveRouteListResult
from ._models_py3 import EndpointServiceResult
from ._models_py3 import EndpointServicesListResult
from ._models_py3 import Error
from ._models_py3 import ErrorDetails
from ._models_py3 import ExpressRouteCircuit
from ._models_py3 import ExpressRouteCircuitArpTable
from ._models_py3 import ExpressRouteCircuitAuthorization
from ._models_py3 import ExpressRouteCircuitListResult
from ._models_py3 import ExpressRouteCircuitPeering
from ._models_py3 import ExpressRouteCircuitPeeringConfig
from ._models_py3 import ExpressRouteCircuitPeeringListResult
from ._models_py3 import ExpressRouteCircuitRoutesTable
from ._models_py3 import ExpressRouteCircuitRoutesTableSummary
from ._models_py3 import ExpressRouteCircuitServiceProviderProperties
from ._models_py3 import ExpressRouteCircuitSku
from ._models_py3 import ExpressRouteCircuitStats
from ._models_py3 import ExpressRouteCircuitsArpTableListResult
from ._models_py3 import ExpressRouteCircuitsRoutesTableListResult
from ._models_py3 import ExpressRouteCircuitsRoutesTableSummaryListResult
from ._models_py3 import ExpressRouteServiceProvider
from ._models_py3 import ExpressRouteServiceProviderBandwidthsOffered
from ._models_py3 import ExpressRouteServiceProviderListResult
from ._models_py3 import FlowLogInformation
from ._models_py3 import FlowLogStatusParameters
from ._models_py3 import FrontendIPConfiguration
from ._models_py3 import GatewayRoute
from ._models_py3 import GatewayRouteListResult
from ._models_py3 import IPAddressAvailabilityResult
from ._models_py3 import IPConfiguration
from ._models_py3 import InboundNatPool
from ._models_py3 import InboundNatRule
from ._models_py3 import InboundNatRuleListResult
from ._models_py3 import IpsecPolicy
from ._models_py3 import Ipv6ExpressRouteCircuitPeeringConfig
from ._models_py3 import LoadBalancer
from ._models_py3 import LoadBalancerBackendAddressPoolListResult
from ._models_py3 import LoadBalancerFrontendIPConfigurationListResult
from ._models_py3 import LoadBalancerListResult
from ._models_py3 import LoadBalancerLoadBalancingRuleListResult
from ._models_py3 import LoadBalancerProbeListResult
from ._models_py3 import LoadBalancerSku
from ._models_py3 import LoadBalancingRule
from ._models_py3 import LocalNetworkGateway
from ._models_py3 import LocalNetworkGatewayListResult
from ._models_py3 import LogSpecification
from ._models_py3 import MetricSpecification
from ._models_py3 import NetworkInterface
from ._models_py3 import NetworkInterfaceAssociation
from ._models_py3 import NetworkInterfaceDnsSettings
from ._models_py3 import NetworkInterfaceIPConfiguration
from ._models_py3 import NetworkInterfaceIPConfigurationListResult
from ._models_py3 import NetworkInterfaceListResult
from ._models_py3 import NetworkInterfaceLoadBalancerListResult
from ._models_py3 import NetworkSecurityGroup
from ._models_py3 import NetworkSecurityGroupListResult
from ._models_py3 import NetworkWatcher
from ._models_py3 import NetworkWatcherListResult
from ._models_py3 import NextHopParameters
from ._models_py3 import NextHopResult
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OperationPropertiesFormatServiceSpecification
from ._models_py3 import OutboundNatRule
from ._models_py3 import PacketCapture
from ._models_py3 import PacketCaptureFilter
from ._models_py3 import PacketCaptureListResult
from ._models_py3 import PacketCaptureParameters
from ._models_py3 import PacketCaptureQueryStatusResult
from ._models_py3 import PacketCaptureResult
from ._models_py3 import PacketCaptureResultProperties
from ._models_py3 import PacketCaptureStorageLocation
from ._models_py3 import PatchRouteFilter
from ._models_py3 import PatchRouteFilterRule
from ._models_py3 import Probe
from ._models_py3 import PublicIPAddress
from ._models_py3 import PublicIPAddressDnsSettings
from ._models_py3 import PublicIPAddressListResult
from ._models_py3 import PublicIPAddressSku
from ._models_py3 import QueryTroubleshootingParameters
from ._models_py3 import Resource
from ._models_py3 import ResourceNavigationLink
from ._models_py3 import RetentionPolicyParameters
from ._models_py3 import Route
from ._models_py3 import RouteFilter
from ._models_py3 import RouteFilterListResult
from ._models_py3 import RouteFilterRule
from ._models_py3 import RouteFilterRuleListResult
from ._models_py3 import RouteListResult
from ._models_py3 import RouteTable
from ._models_py3 import RouteTableListResult
from ._models_py3 import SecurityGroupNetworkInterface
from ._models_py3 import SecurityGroupViewParameters
from ._models_py3 import SecurityGroupViewResult
from ._models_py3 import SecurityRule
from ._models_py3 import SecurityRuleAssociations
from ._models_py3 import SecurityRuleListResult
from ._models_py3 import ServiceEndpointPropertiesFormat
from ._models_py3 import SubResource
from ._models_py3 import Subnet
from ._models_py3 import SubnetAssociation
from ._models_py3 import SubnetListResult
from ._models_py3 import TagsObject
from ._models_py3 import Topology
from ._models_py3 import TopologyAssociation
from ._models_py3 import TopologyParameters
from ._models_py3 import TopologyResource
from ._models_py3 import TroubleshootingDetails
from ._models_py3 import TroubleshootingParameters
from ._models_py3 import TroubleshootingRecommendedActions
from ._models_py3 import TroubleshootingResult
from ._models_py3 import TunnelConnectionHealth
from ._models_py3 import Usage
from ._models_py3 import UsageName
from ._models_py3 import UsagesListResult
from ._models_py3 import VerificationIPFlowParameters
from ._models_py3 import VerificationIPFlowResult
from ._models_py3 import VirtualNetwork
from ._models_py3 import VirtualNetworkConnectionGatewayReference
from ._models_py3 import VirtualNetworkGateway
from ._models_py3 import VirtualNetworkGatewayConnection
from ._models_py3 import VirtualNetworkGatewayConnectionListEntity
from ._models_py3 import VirtualNetworkGatewayConnectionListResult
from ._models_py3 import VirtualNetworkGatewayIPConfiguration
from ._models_py3 import VirtualNetworkGatewayListConnectionsResult
from ._models_py3 import VirtualNetworkGatewayListResult
from ._models_py3 import VirtualNetworkGatewaySku
from ._models_py3 import VirtualNetworkListResult
from ._models_py3 import VirtualNetworkListUsageResult
from ._models_py3 import VirtualNetworkPeering
from ._models_py3 import VirtualNetworkPeeringListResult
from ._models_py3 import VirtualNetworkUsage
from ._models_py3 import VirtualNetworkUsageName
from ._models_py3 import VpnClientConfiguration
from ._models_py3 import VpnClientParameters
from ._models_py3 import VpnClientRevokedCertificate
from ._models_py3 import VpnClientRootCertificate
from ._models_py3 import VpnDeviceScriptParameters
except (SyntaxError, ImportError):
from ._models import AddressSpace # type: ignore
from ._models import ApplicationGateway # type: ignore
from ._models import ApplicationGatewayAuthenticationCertificate # type: ignore
from ._models import ApplicationGatewayAvailableSslOptions # type: ignore
from ._models import ApplicationGatewayAvailableSslPredefinedPolicies # type: ignore
from ._models import ApplicationGatewayAvailableWafRuleSetsResult # type: ignore
from ._models import ApplicationGatewayBackendAddress # type: ignore
from ._models import ApplicationGatewayBackendAddressPool # type: ignore
from ._models import ApplicationGatewayBackendHealth # type: ignore
from ._models import ApplicationGatewayBackendHealthHttpSettings # type: ignore
from ._models import ApplicationGatewayBackendHealthPool # type: ignore
from ._models import ApplicationGatewayBackendHealthServer # type: ignore
from ._models import ApplicationGatewayBackendHttpSettings # type: ignore
from ._models import ApplicationGatewayConnectionDraining # type: ignore
from ._models import ApplicationGatewayFirewallDisabledRuleGroup # type: ignore
from ._models import ApplicationGatewayFirewallRule # type: ignore
from ._models import ApplicationGatewayFirewallRuleGroup # type: ignore
from ._models import ApplicationGatewayFirewallRuleSet # type: ignore
from ._models import ApplicationGatewayFrontendIPConfiguration # type: ignore
from ._models import ApplicationGatewayFrontendPort # type: ignore
from ._models import ApplicationGatewayHttpListener # type: ignore
from ._models import ApplicationGatewayIPConfiguration # type: ignore
from ._models import ApplicationGatewayListResult # type: ignore
from ._models import ApplicationGatewayPathRule # type: ignore
from ._models import ApplicationGatewayProbe # type: ignore
from ._models import ApplicationGatewayProbeHealthResponseMatch # type: ignore
from ._models import ApplicationGatewayRedirectConfiguration # type: ignore
from ._models import ApplicationGatewayRequestRoutingRule # type: ignore
from ._models import ApplicationGatewaySku # type: ignore
from ._models import ApplicationGatewaySslCertificate # type: ignore
from ._models import ApplicationGatewaySslPolicy # type: ignore
from ._models import ApplicationGatewaySslPredefinedPolicy # type: ignore
from ._models import ApplicationGatewayUrlPathMap # type: ignore
from ._models import ApplicationGatewayWebApplicationFirewallConfiguration # type: ignore
from ._models import ApplicationSecurityGroup # type: ignore
from ._models import ApplicationSecurityGroupListResult # type: ignore
from ._models import AuthorizationListResult # type: ignore
from ._models import Availability # type: ignore
from ._models import AvailableProvidersList # type: ignore
from ._models import AvailableProvidersListCity # type: ignore
from ._models import AvailableProvidersListCountry # type: ignore
from ._models import AvailableProvidersListParameters # type: ignore
from ._models import AvailableProvidersListState # type: ignore
from ._models import AzureAsyncOperationResult # type: ignore
from ._models import AzureReachabilityReport # type: ignore
from ._models import AzureReachabilityReportItem # type: ignore
from ._models import AzureReachabilityReportLatencyInfo # type: ignore
from ._models import AzureReachabilityReportLocation # type: ignore
from ._models import AzureReachabilityReportParameters # type: ignore
from ._models import BGPCommunity # type: ignore
from ._models import BackendAddressPool # type: ignore
from ._models import BgpPeerStatus # type: ignore
from ._models import BgpPeerStatusListResult # type: ignore
from ._models import BgpServiceCommunity # type: ignore
from ._models import BgpServiceCommunityListResult # type: ignore
from ._models import BgpSettings # type: ignore
from ._models import ConnectionMonitor # type: ignore
from ._models import ConnectionMonitorDestination # type: ignore
from ._models import ConnectionMonitorListResult # type: ignore
from ._models import ConnectionMonitorParameters # type: ignore
from ._models import ConnectionMonitorQueryResult # type: ignore
from ._models import ConnectionMonitorResult # type: ignore
from ._models import ConnectionMonitorResultProperties # type: ignore
from ._models import ConnectionMonitorSource # type: ignore
from ._models import ConnectionResetSharedKey # type: ignore
from ._models import ConnectionSharedKey # type: ignore
from ._models import ConnectionStateSnapshot # type: ignore
from ._models import ConnectivityDestination # type: ignore
from ._models import ConnectivityHop # type: ignore
from ._models import ConnectivityInformation # type: ignore
from ._models import ConnectivityIssue # type: ignore
from ._models import ConnectivityParameters # type: ignore
from ._models import ConnectivitySource # type: ignore
from ._models import DhcpOptions # type: ignore
from ._models import Dimension # type: ignore
from ._models import DnsNameAvailabilityResult # type: ignore
from ._models import EffectiveNetworkSecurityGroup # type: ignore
from ._models import EffectiveNetworkSecurityGroupAssociation # type: ignore
from ._models import EffectiveNetworkSecurityGroupListResult # type: ignore
from ._models import EffectiveNetworkSecurityRule # type: ignore
from ._models import EffectiveRoute # type: ignore
from ._models import EffectiveRouteListResult # type: ignore
from ._models import EndpointServiceResult # type: ignore
from ._models import EndpointServicesListResult # type: ignore
from ._models import Error # type: ignore
from ._models import ErrorDetails # type: ignore
from ._models import ExpressRouteCircuit # type: ignore
from ._models import ExpressRouteCircuitArpTable # type: ignore
from ._models import ExpressRouteCircuitAuthorization # type: ignore
from ._models import ExpressRouteCircuitListResult # type: ignore
from ._models import ExpressRouteCircuitPeering # type: ignore
from ._models import ExpressRouteCircuitPeeringConfig # type: ignore
from ._models import ExpressRouteCircuitPeeringListResult # type: ignore
from ._models import ExpressRouteCircuitRoutesTable # type: ignore
from ._models import ExpressRouteCircuitRoutesTableSummary # type: ignore
from ._models import ExpressRouteCircuitServiceProviderProperties # type: ignore
from ._models import ExpressRouteCircuitSku # type: ignore
from ._models import ExpressRouteCircuitStats # type: ignore
from ._models import ExpressRouteCircuitsArpTableListResult # type: ignore
from ._models import ExpressRouteCircuitsRoutesTableListResult # type: ignore
from ._models import ExpressRouteCircuitsRoutesTableSummaryListResult # type: ignore
from ._models import ExpressRouteServiceProvider # type: ignore
from ._models import ExpressRouteServiceProviderBandwidthsOffered # type: ignore
from ._models import ExpressRouteServiceProviderListResult # type: ignore
from ._models import FlowLogInformation # type: ignore
from ._models import FlowLogStatusParameters # type: ignore
from ._models import FrontendIPConfiguration # type: ignore
from ._models import GatewayRoute # type: ignore
from ._models import GatewayRouteListResult # type: ignore
from ._models import IPAddressAvailabilityResult # type: ignore
from ._models import IPConfiguration # type: ignore
from ._models import InboundNatPool # type: ignore
from ._models import InboundNatRule # type: ignore
from ._models import InboundNatRuleListResult # type: ignore
from ._models import IpsecPolicy # type: ignore
from ._models import Ipv6ExpressRouteCircuitPeeringConfig # type: ignore
from ._models import LoadBalancer # type: ignore
from ._models import LoadBalancerBackendAddressPoolListResult # type: ignore
from ._models import LoadBalancerFrontendIPConfigurationListResult # type: ignore
from ._models import LoadBalancerListResult # type: ignore
from ._models import LoadBalancerLoadBalancingRuleListResult # type: ignore
from ._models import LoadBalancerProbeListResult # type: ignore
from ._models import LoadBalancerSku # type: ignore
from ._models import LoadBalancingRule # type: ignore
from ._models import LocalNetworkGateway # type: ignore
from ._models import LocalNetworkGatewayListResult # type: ignore
from ._models import LogSpecification # type: ignore
from ._models import MetricSpecification # type: ignore
from ._models import NetworkInterface # type: ignore
from ._models import NetworkInterfaceAssociation # type: ignore
from ._models import NetworkInterfaceDnsSettings # type: ignore
from ._models import NetworkInterfaceIPConfiguration # type: ignore
from ._models import NetworkInterfaceIPConfigurationListResult # type: ignore
from ._models import NetworkInterfaceListResult # type: ignore
from ._models import NetworkInterfaceLoadBalancerListResult # type: ignore
from ._models import NetworkSecurityGroup # type: ignore
from ._models import NetworkSecurityGroupListResult # type: ignore
from ._models import NetworkWatcher # type: ignore
from ._models import NetworkWatcherListResult # type: ignore
from ._models import NextHopParameters # type: ignore
from ._models import NextHopResult # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OperationPropertiesFormatServiceSpecification # type: ignore
from ._models import OutboundNatRule # type: ignore
from ._models import PacketCapture # type: ignore
from ._models import PacketCaptureFilter # type: ignore
from ._models import PacketCaptureListResult # type: ignore
from ._models import PacketCaptureParameters # type: ignore
from ._models import PacketCaptureQueryStatusResult # type: ignore
from ._models import PacketCaptureResult # type: ignore
from ._models import PacketCaptureResultProperties # type: ignore
from ._models import PacketCaptureStorageLocation # type: ignore
from ._models import PatchRouteFilter # type: ignore
from ._models import PatchRouteFilterRule # type: ignore
from ._models import Probe # type: ignore
from ._models import PublicIPAddress # type: ignore
from ._models import PublicIPAddressDnsSettings # type: ignore
from ._models import PublicIPAddressListResult # type: ignore
from ._models import PublicIPAddressSku # type: ignore
from ._models import QueryTroubleshootingParameters # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceNavigationLink # type: ignore
from ._models import RetentionPolicyParameters # type: ignore
from ._models import Route # type: ignore
from ._models import RouteFilter # type: ignore
from ._models import RouteFilterListResult # type: ignore
from ._models import RouteFilterRule # type: ignore
from ._models import RouteFilterRuleListResult # type: ignore
from ._models import RouteListResult # type: ignore
from ._models import RouteTable # type: ignore
from ._models import RouteTableListResult # type: ignore
from ._models import SecurityGroupNetworkInterface # type: ignore
from ._models import SecurityGroupViewParameters # type: ignore
from ._models import SecurityGroupViewResult # type: ignore
from ._models import SecurityRule # type: ignore
from ._models import SecurityRuleAssociations # type: ignore
from ._models import SecurityRuleListResult # type: ignore
from ._models import ServiceEndpointPropertiesFormat # type: ignore
from ._models import SubResource # type: ignore
from ._models import Subnet # type: ignore
from ._models import SubnetAssociation # type: ignore
from ._models import SubnetListResult # type: ignore
from ._models import TagsObject # type: ignore
from ._models import Topology # type: ignore
from ._models import TopologyAssociation # type: ignore
from ._models import TopologyParameters # type: ignore
from ._models import TopologyResource # type: ignore
from ._models import TroubleshootingDetails # type: ignore
from ._models import TroubleshootingParameters # type: ignore
from ._models import TroubleshootingRecommendedActions # type: ignore
from ._models import TroubleshootingResult # type: ignore
from ._models import TunnelConnectionHealth # type: ignore
from ._models import Usage # type: ignore
from ._models import UsageName # type: ignore
from ._models import UsagesListResult # type: ignore
from ._models import VerificationIPFlowParameters # type: ignore
from ._models import VerificationIPFlowResult # type: ignore
from ._models import VirtualNetwork # type: ignore
from ._models import VirtualNetworkConnectionGatewayReference # type: ignore
from ._models import VirtualNetworkGateway # type: ignore
from ._models import VirtualNetworkGatewayConnection # type: ignore
from ._models import VirtualNetworkGatewayConnectionListEntity # type: ignore
from ._models import VirtualNetworkGatewayConnectionListResult # type: ignore
from ._models import VirtualNetworkGatewayIPConfiguration # type: ignore
from ._models import VirtualNetworkGatewayListConnectionsResult # type: ignore
from ._models import VirtualNetworkGatewayListResult # type: ignore
from ._models import VirtualNetworkGatewaySku # type: ignore
from ._models import VirtualNetworkListResult # type: ignore
from ._models import VirtualNetworkListUsageResult # type: ignore
from ._models import VirtualNetworkPeering # type: ignore
from ._models import VirtualNetworkPeeringListResult # type: ignore
from ._models import VirtualNetworkUsage # type: ignore
from ._models import VirtualNetworkUsageName # type: ignore
from ._models import VpnClientConfiguration # type: ignore
from ._models import VpnClientParameters # type: ignore
from ._models import VpnClientRevokedCertificate # type: ignore
from ._models import VpnClientRootCertificate # type: ignore
from ._models import VpnDeviceScriptParameters # type: ignore
from ._network_management_client_enums import (
Access,
ApplicationGatewayBackendHealthServerHealth,
ApplicationGatewayCookieBasedAffinity,
ApplicationGatewayFirewallMode,
ApplicationGatewayOperationalState,
ApplicationGatewayProtocol,
ApplicationGatewayRedirectType,
ApplicationGatewayRequestRoutingRuleType,
ApplicationGatewaySkuName,
ApplicationGatewaySslCipherSuite,
ApplicationGatewaySslPolicyName,
ApplicationGatewaySslPolicyType,
ApplicationGatewaySslProtocol,
ApplicationGatewayTier,
AssociationType,
AuthenticationMethod,
AuthorizationUseStatus,
BgpPeerState,
ConnectionState,
ConnectionStatus,
DhGroup,
Direction,
EffectiveRouteSource,
EffectiveRouteState,
EffectiveSecurityRuleProtocol,
EvaluationState,
ExpressRouteCircuitPeeringAdvertisedPublicPrefixState,
ExpressRouteCircuitPeeringState,
ExpressRouteCircuitPeeringType,
ExpressRouteCircuitSkuFamily,
ExpressRouteCircuitSkuTier,
IPAllocationMethod,
IPVersion,
IkeEncryption,
IkeIntegrity,
IpsecEncryption,
IpsecIntegrity,
IssueType,
LoadBalancerSkuName,
LoadDistribution,
NetworkOperationStatus,
NextHopType,
Origin,
PcError,
PcProtocol,
PcStatus,
PfsGroup,
ProbeProtocol,
ProcessorArchitecture,
Protocol,
ProvisioningState,
PublicIPAddressSkuName,
RouteFilterRuleType,
RouteNextHopType,
SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol,
ServiceProviderProvisioningState,
Severity,
TransportProtocol,
UsageUnit,
VirtualNetworkGatewayConnectionStatus,
VirtualNetworkGatewayConnectionType,
VirtualNetworkGatewaySkuName,
VirtualNetworkGatewaySkuTier,
VirtualNetworkGatewayType,
VirtualNetworkPeeringState,
VpnClientProtocol,
VpnType,
)
__all__ = [
'AddressSpace',
'ApplicationGateway',
'ApplicationGatewayAuthenticationCertificate',
'ApplicationGatewayAvailableSslOptions',
'ApplicationGatewayAvailableSslPredefinedPolicies',
'ApplicationGatewayAvailableWafRuleSetsResult',
'ApplicationGatewayBackendAddress',
'ApplicationGatewayBackendAddressPool',
'ApplicationGatewayBackendHealth',
'ApplicationGatewayBackendHealthHttpSettings',
'ApplicationGatewayBackendHealthPool',
'ApplicationGatewayBackendHealthServer',
'ApplicationGatewayBackendHttpSettings',
'ApplicationGatewayConnectionDraining',
'ApplicationGatewayFirewallDisabledRuleGroup',
'ApplicationGatewayFirewallRule',
'ApplicationGatewayFirewallRuleGroup',
'ApplicationGatewayFirewallRuleSet',
'ApplicationGatewayFrontendIPConfiguration',
'ApplicationGatewayFrontendPort',
'ApplicationGatewayHttpListener',
'ApplicationGatewayIPConfiguration',
'ApplicationGatewayListResult',
'ApplicationGatewayPathRule',
'ApplicationGatewayProbe',
'ApplicationGatewayProbeHealthResponseMatch',
'ApplicationGatewayRedirectConfiguration',
'ApplicationGatewayRequestRoutingRule',
'ApplicationGatewaySku',
'ApplicationGatewaySslCertificate',
'ApplicationGatewaySslPolicy',
'ApplicationGatewaySslPredefinedPolicy',
'ApplicationGatewayUrlPathMap',
'ApplicationGatewayWebApplicationFirewallConfiguration',
'ApplicationSecurityGroup',
'ApplicationSecurityGroupListResult',
'AuthorizationListResult',
'Availability',
'AvailableProvidersList',
'AvailableProvidersListCity',
'AvailableProvidersListCountry',
'AvailableProvidersListParameters',
'AvailableProvidersListState',
'AzureAsyncOperationResult',
'AzureReachabilityReport',
'AzureReachabilityReportItem',
'AzureReachabilityReportLatencyInfo',
'AzureReachabilityReportLocation',
'AzureReachabilityReportParameters',
'BGPCommunity',
'BackendAddressPool',
'BgpPeerStatus',
'BgpPeerStatusListResult',
'BgpServiceCommunity',
'BgpServiceCommunityListResult',
'BgpSettings',
'ConnectionMonitor',
'ConnectionMonitorDestination',
'ConnectionMonitorListResult',
'ConnectionMonitorParameters',
'ConnectionMonitorQueryResult',
'ConnectionMonitorResult',
'ConnectionMonitorResultProperties',
'ConnectionMonitorSource',
'ConnectionResetSharedKey',
'ConnectionSharedKey',
'ConnectionStateSnapshot',
'ConnectivityDestination',
'ConnectivityHop',
'ConnectivityInformation',
'ConnectivityIssue',
'ConnectivityParameters',
'ConnectivitySource',
'DhcpOptions',
'Dimension',
'DnsNameAvailabilityResult',
'EffectiveNetworkSecurityGroup',
'EffectiveNetworkSecurityGroupAssociation',
'EffectiveNetworkSecurityGroupListResult',
'EffectiveNetworkSecurityRule',
'EffectiveRoute',
'EffectiveRouteListResult',
'EndpointServiceResult',
'EndpointServicesListResult',
'Error',
'ErrorDetails',
'ExpressRouteCircuit',
'ExpressRouteCircuitArpTable',
'ExpressRouteCircuitAuthorization',
'ExpressRouteCircuitListResult',
'ExpressRouteCircuitPeering',
'ExpressRouteCircuitPeeringConfig',
'ExpressRouteCircuitPeeringListResult',
'ExpressRouteCircuitRoutesTable',
'ExpressRouteCircuitRoutesTableSummary',
'ExpressRouteCircuitServiceProviderProperties',
'ExpressRouteCircuitSku',
'ExpressRouteCircuitStats',
'ExpressRouteCircuitsArpTableListResult',
'ExpressRouteCircuitsRoutesTableListResult',
'ExpressRouteCircuitsRoutesTableSummaryListResult',
'ExpressRouteServiceProvider',
'ExpressRouteServiceProviderBandwidthsOffered',
'ExpressRouteServiceProviderListResult',
'FlowLogInformation',
'FlowLogStatusParameters',
'FrontendIPConfiguration',
'GatewayRoute',
'GatewayRouteListResult',
'IPAddressAvailabilityResult',
'IPConfiguration',
'InboundNatPool',
'InboundNatRule',
'InboundNatRuleListResult',
'IpsecPolicy',
'Ipv6ExpressRouteCircuitPeeringConfig',
'LoadBalancer',
'LoadBalancerBackendAddressPoolListResult',
'LoadBalancerFrontendIPConfigurationListResult',
'LoadBalancerListResult',
'LoadBalancerLoadBalancingRuleListResult',
'LoadBalancerProbeListResult',
'LoadBalancerSku',
'LoadBalancingRule',
'LocalNetworkGateway',
'LocalNetworkGatewayListResult',
'LogSpecification',
'MetricSpecification',
'NetworkInterface',
'NetworkInterfaceAssociation',
'NetworkInterfaceDnsSettings',
'NetworkInterfaceIPConfiguration',
'NetworkInterfaceIPConfigurationListResult',
'NetworkInterfaceListResult',
'NetworkInterfaceLoadBalancerListResult',
'NetworkSecurityGroup',
'NetworkSecurityGroupListResult',
'NetworkWatcher',
'NetworkWatcherListResult',
'NextHopParameters',
'NextHopResult',
'Operation',
'OperationDisplay',
'OperationListResult',
'OperationPropertiesFormatServiceSpecification',
'OutboundNatRule',
'PacketCapture',
'PacketCaptureFilter',
'PacketCaptureListResult',
'PacketCaptureParameters',
'PacketCaptureQueryStatusResult',
'PacketCaptureResult',
'PacketCaptureResultProperties',
'PacketCaptureStorageLocation',
'PatchRouteFilter',
'PatchRouteFilterRule',
'Probe',
'PublicIPAddress',
'PublicIPAddressDnsSettings',
'PublicIPAddressListResult',
'PublicIPAddressSku',
'QueryTroubleshootingParameters',
'Resource',
'ResourceNavigationLink',
'RetentionPolicyParameters',
'Route',
'RouteFilter',
'RouteFilterListResult',
'RouteFilterRule',
'RouteFilterRuleListResult',
'RouteListResult',
'RouteTable',
'RouteTableListResult',
'SecurityGroupNetworkInterface',
'SecurityGroupViewParameters',
'SecurityGroupViewResult',
'SecurityRule',
'SecurityRuleAssociations',
'SecurityRuleListResult',
'ServiceEndpointPropertiesFormat',
'SubResource',
'Subnet',
'SubnetAssociation',
'SubnetListResult',
'TagsObject',
'Topology',
'TopologyAssociation',
'TopologyParameters',
'TopologyResource',
'TroubleshootingDetails',
'TroubleshootingParameters',
'TroubleshootingRecommendedActions',
'TroubleshootingResult',
'TunnelConnectionHealth',
'Usage',
'UsageName',
'UsagesListResult',
'VerificationIPFlowParameters',
'VerificationIPFlowResult',
'VirtualNetwork',
'VirtualNetworkConnectionGatewayReference',
'VirtualNetworkGateway',
'VirtualNetworkGatewayConnection',
'VirtualNetworkGatewayConnectionListEntity',
'VirtualNetworkGatewayConnectionListResult',
'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewayListConnectionsResult',
'VirtualNetworkGatewayListResult',
'VirtualNetworkGatewaySku',
'VirtualNetworkListResult',
'VirtualNetworkListUsageResult',
'VirtualNetworkPeering',
'VirtualNetworkPeeringListResult',
'VirtualNetworkUsage',
'VirtualNetworkUsageName',
'VpnClientConfiguration',
'VpnClientParameters',
'VpnClientRevokedCertificate',
'VpnClientRootCertificate',
'VpnDeviceScriptParameters',
'Access',
'ApplicationGatewayBackendHealthServerHealth',
'ApplicationGatewayCookieBasedAffinity',
'ApplicationGatewayFirewallMode',
'ApplicationGatewayOperationalState',
'ApplicationGatewayProtocol',
'ApplicationGatewayRedirectType',
'ApplicationGatewayRequestRoutingRuleType',
'ApplicationGatewaySkuName',
'ApplicationGatewaySslCipherSuite',
'ApplicationGatewaySslPolicyName',
'ApplicationGatewaySslPolicyType',
'ApplicationGatewaySslProtocol',
'ApplicationGatewayTier',
'AssociationType',
'AuthenticationMethod',
'AuthorizationUseStatus',
'BgpPeerState',
'ConnectionState',
'ConnectionStatus',
'DhGroup',
'Direction',
'EffectiveRouteSource',
'EffectiveRouteState',
'EffectiveSecurityRuleProtocol',
'EvaluationState',
'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState',
'ExpressRouteCircuitPeeringState',
'ExpressRouteCircuitPeeringType',
'ExpressRouteCircuitSkuFamily',
'ExpressRouteCircuitSkuTier',
'IPAllocationMethod',
'IPVersion',
'IkeEncryption',
'IkeIntegrity',
'IpsecEncryption',
'IpsecIntegrity',
'IssueType',
'LoadBalancerSkuName',
'LoadDistribution',
'NetworkOperationStatus',
'NextHopType',
'Origin',
'PcError',
'PcProtocol',
'PcStatus',
'PfsGroup',
'ProbeProtocol',
'ProcessorArchitecture',
'Protocol',
'ProvisioningState',
'PublicIPAddressSkuName',
'RouteFilterRuleType',
'RouteNextHopType',
'SecurityRuleAccess',
'SecurityRuleDirection',
'SecurityRuleProtocol',
'ServiceProviderProvisioningState',
'Severity',
'TransportProtocol',
'UsageUnit',
'VirtualNetworkGatewayConnectionStatus',
'VirtualNetworkGatewayConnectionType',
'VirtualNetworkGatewaySkuName',
'VirtualNetworkGatewaySkuTier',
'VirtualNetworkGatewayType',
'VirtualNetworkPeeringState',
'VpnClientProtocol',
'VpnType',
]
|
|
import mock
import functools
from modularodm import Q
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from framework.auth.core import Auth
from osf_tests.factories import PreprintFactory, AuthUserFactory, ProjectFactory, SubjectFactory, PreprintProviderFactory
from osf.models import PreprintService, NodeLicense
from website.project.licenses import ensure_licenses
from website.project.signals import contributor_added
from website.identifiers.utils import build_ezid_metadata
from tests.base import ApiTestCase, fake, capture_signals
ensure_licenses = functools.partial(ensure_licenses, warn=False)
def build_preprint_update_payload(node_id, attributes=None, relationships=None):
payload = {
"data": {
"id": node_id,
"attributes": attributes,
"relationships": relationships
}
}
return payload
class TestPreprintDetail(ApiTestCase):
def setUp(self):
super(TestPreprintDetail, self).setUp()
self.user = AuthUserFactory()
self.preprint = PreprintFactory(creator=self.user)
self.url = '/{}preprints/{}/'.format(API_BASE, self.preprint._id)
self.res = self.app.get(self.url)
self.data = self.res.json['data']
def test_preprint_detail_success(self):
assert_equal(self.res.status_code, 200)
assert_equal(self.res.content_type, 'application/vnd.api+json')
def test_preprint_top_level(self):
assert_equal(self.data['type'], 'preprints')
assert_equal(self.data['id'], self.preprint._id)
def test_preprint_node_deleted_detail_failure(self):
deleted_node = ProjectFactory(creator=self.user, is_deleted=True)
deleted_preprint = PreprintFactory(project=deleted_node, creator=self.user)
url = '/{}preprints/{}/'.format(API_BASE, deleted_preprint._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(self.res.content_type, 'application/vnd.api+json')
class TestPreprintDelete(ApiTestCase):
def setUp(self):
super(TestPreprintDelete, self).setUp()
self.user = AuthUserFactory()
self.unpublished_preprint = PreprintFactory(creator=self.user, is_published=False)
self.published_preprint = PreprintFactory(creator=self.user)
self.url = '/{}preprints/{{}}/'.format(API_BASE)
def test_can_delete_unpublished(self):
previous_ids = list(PreprintService.objects.all().values_list('pk', flat=True))
self.app.delete(self.url.format(self.unpublished_preprint._id), auth=self.user.auth)
remaining_ids = list(PreprintService.objects.all().values_list('pk', flat=True))
assert_in(self.unpublished_preprint.pk, previous_ids)
assert_not_in(self.unpublished_preprint.pk, remaining_ids)
def test_cannot_delete_published(self):
previous_ids = list(PreprintService.objects.all().values_list('pk', flat=True))
res = self.app.delete(self.url.format(self.published_preprint._id), auth=self.user.auth, expect_errors=True)
remaining_ids = list(PreprintService.objects.all().values_list('pk', flat=True))
assert_equal(res.status_code, 409)
assert_equal(previous_ids, remaining_ids)
assert_in(self.published_preprint.pk, remaining_ids)
def test_deletes_only_requested_document(self):
previous_ids = list(PreprintService.objects.all().values_list('pk', flat=True))
res = self.app.delete(self.url.format(self.unpublished_preprint._id), auth=self.user.auth)
remaining_ids = list(PreprintService.objects.all().values_list('pk', flat=True))
assert_in(self.unpublished_preprint.pk, previous_ids)
assert_in(self.published_preprint.pk, previous_ids)
assert_not_in(self.unpublished_preprint.pk, remaining_ids)
assert_in(self.published_preprint.pk, remaining_ids)
class TestPreprintUpdate(ApiTestCase):
def setUp(self):
super(TestPreprintUpdate, self).setUp()
self.user = AuthUserFactory()
self.preprint = PreprintFactory(creator=self.user)
self.url = '/{}preprints/{}/'.format(API_BASE, self.preprint._id)
self.subject = SubjectFactory()
def test_update_preprint_permission_denied(self):
update_doi_payload = build_preprint_update_payload(self.preprint._id, attributes={'article_doi': '10.123/456/789'})
noncontrib = AuthUserFactory()
res = self.app.patch_json_api(self.url, update_doi_payload, auth=noncontrib.auth, expect_errors=True)
assert_equal(res.status_code, 403)
res = self.app.patch_json_api(self.url, update_doi_payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_update_subjects(self):
assert_false(self.preprint.subjects.filter(_id=self.subject._id).exists())
update_subjects_payload = build_preprint_update_payload(self.preprint._id, attributes={"subjects": [[self.subject._id]]})
res = self.app.patch_json_api(self.url, update_subjects_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_true(self.preprint.subjects.filter(_id=self.subject._id).exists())
def test_update_invalid_subjects(self):
subjects = self.preprint.subjects
update_subjects_payload = build_preprint_update_payload(self.preprint._id, attributes={"subjects": [['wwe']]})
res = self.app.patch_json_api(self.url, update_subjects_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.preprint.reload()
assert_equal(self.preprint.subjects, subjects)
def test_update_primary_file(self):
new_file = test_utils.create_test_file(self.preprint.node, self.user, filename='openupthatwindow.pdf')
relationships = {
"primary_file": {
"data": {
"type": "file",
"id": new_file._id
}
}
}
assert_not_equal(self.preprint.primary_file, new_file)
update_file_payload = build_preprint_update_payload(self.preprint._id, relationships=relationships)
res = self.app.patch_json_api(self.url, update_file_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.preprint.node.reload()
assert_equal(self.preprint.primary_file, new_file)
# check logs
log = self.preprint.node.logs.latest()
assert_equal(log.action, 'preprint_file_updated')
assert_equal(log.params.get('preprint'), self.preprint._id)
def test_new_primary_not_in_node(self):
project = ProjectFactory()
file_for_project = test_utils.create_test_file(project, self.user, filename='letoutthatantidote.pdf')
relationships = {
"primary_file": {
"data": {
"type": "file",
"id": file_for_project._id
}
}
}
update_file_payload = build_preprint_update_payload(self.preprint._id, relationships=relationships)
res = self.app.patch_json_api(self.url, update_file_payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.preprint.reload()
assert_not_equal(self.preprint.primary_file, file_for_project)
def test_update_article_doi(self):
new_doi = '10.1234/ASDFASDF'
assert_not_equal(self.preprint.article_doi, new_doi)
update_subjects_payload = build_preprint_update_payload(self.preprint._id, attributes={"doi": new_doi})
res = self.app.patch_json_api(self.url, update_subjects_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.preprint.node.reload()
assert_equal(self.preprint.article_doi, new_doi)
preprint_detail = self.app.get(self.url, auth=self.user.auth).json['data']
assert_equal(preprint_detail['links']['doi'], 'https://dx.doi.org/{}'.format(new_doi))
def test_write_contrib_cannot_set_primary_file(self):
user_two = AuthUserFactory()
self.preprint.node.add_contributor(user_two, permissions=['read', 'write'], auth=Auth(self.user), save=True)
new_file = test_utils.create_test_file(self.preprint.node, self.user, filename='openupthatwindow.pdf')
data = {
'data':{
'type': 'primary_file',
'id': self.preprint._id,
'attributes': {},
'relationships': {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
}
}
res = self.app.patch_json_api(self.url, data, auth=user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_noncontrib_cannot_set_primary_file(self):
user_two = AuthUserFactory()
new_file = test_utils.create_test_file(self.preprint.node, self.user, filename='openupthatwindow.pdf')
data = {
'data':{
'type': 'primary_file',
'id': self.preprint._id,
'attributes': {},
'relationships': {
'primary_file': {
'data': {
'type': 'file',
'id': new_file._id
}
}
}
}
}
res = self.app.patch_json_api(self.url, data, auth=user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_write_contrib_cannot_set_subjects(self):
user_two = AuthUserFactory()
self.preprint.node.add_contributor(user_two, permissions=['read', 'write'], auth=Auth(self.user), save=True)
assert_false(self.preprint.subjects.filter(_id=self.subject._id).exists())
update_subjects_payload = build_preprint_update_payload(self.preprint._id, attributes={"subjects": [[self.subject._id]]})
res = self.app.patch_json_api(self.url, update_subjects_payload, auth=user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_false(self.preprint.subjects.filter(_id=self.subject._id).exists())
def test_noncontrib_cannot_set_subjects(self):
user_two = AuthUserFactory()
self.preprint.node.add_contributor(user_two, permissions=['read', 'write'], auth=Auth(self.user), save=True)
assert_false(self.preprint.subjects.filter(_id=self.subject._id).exists())
update_subjects_payload = build_preprint_update_payload(self.preprint._id, attributes={"subjects": [[self.subject._id]]})
res = self.app.patch_json_api(self.url, update_subjects_payload, auth=user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_false(self.preprint.subjects.filter(_id=self.subject._id).exists())
def test_update_published(self):
unpublished = PreprintFactory(creator=self.user, is_published=False)
url = '/{}preprints/{}/'.format(API_BASE, unpublished._id)
payload = build_preprint_update_payload(unpublished._id, attributes={'is_published': True})
res = self.app.patch_json_api(url, payload, auth=self.user.auth)
unpublished.reload()
assert_true(unpublished.is_published)
@mock.patch('website.preprints.tasks.on_preprint_updated.s')
def test_update_preprint_task_called_on_api_update(self, mock_on_preprint_updated):
update_doi_payload = build_preprint_update_payload(self.preprint._id, attributes={'doi': '10.1234/ASDFASDF'})
self.app.patch_json_api(self.url, update_doi_payload, auth=self.user.auth)
self.preprint.node.reload()
assert mock_on_preprint_updated.called
class TestPreprintUpdateLicense(ApiTestCase):
def setUp(self):
super(TestPreprintUpdateLicense, self).setUp()
ensure_licenses()
self.admin_contributor = AuthUserFactory()
self.rw_contributor = AuthUserFactory()
self.read_contributor = AuthUserFactory()
self.non_contributor = AuthUserFactory()
self.preprint_provider = PreprintProviderFactory()
self.preprint = PreprintFactory(creator=self.admin_contributor, provider=self.preprint_provider)
self.preprint.node.add_contributor(self.rw_contributor, auth=Auth(self.admin_contributor))
self.preprint.node.add_contributor(self.read_contributor, auth=Auth(self.admin_contributor), permissions=['read'])
self.preprint.node.save()
self.cc0_license = NodeLicense.find_one(Q('name', 'eq', 'CC0 1.0 Universal'))
self.mit_license = NodeLicense.find_one(Q('name', 'eq', 'MIT License'))
self.no_license = NodeLicense.find_one(Q('name', 'eq', 'No license'))
self.preprint_provider.licenses_acceptable = [self.cc0_license, self.no_license]
self.preprint_provider.save()
self.url = '/{}preprints/{}/'.format(API_BASE, self.preprint._id)
def make_payload(self, node_id, license_id=None, license_year=None, copyright_holders=None):
attributes = {}
if license_year and copyright_holders:
attributes = {
'license_record': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'license_record': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'license_record': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'id': node_id,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'id': node_id,
'attributes': attributes
}
}
def make_request(self, url, data, auth=None, expect_errors=False):
return self.app.patch_json_api(url, data, auth=auth, expect_errors=expect_errors)
def test_admin_update_license_with_invalid_id(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id='thisisafakelicenseid'
)
assert_equal(self.preprint.license, None)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'Unable to find specified license.')
self.preprint.reload()
assert_equal(self.preprint.license, None)
def test_admin_can_update_license(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
assert_equal(self.preprint.license, None)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_equal(self.preprint.license.node_license, self.cc0_license)
assert_equal(self.preprint.license.year, None)
assert_equal(self.preprint.license.copyright_holders, [])
# check logs
log = self.preprint.node.logs.latest()
assert_equal(log.action, 'preprint_license_updated')
assert_equal(log.params.get('preprint'), self.preprint._id)
def test_admin_can_update_license_record(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.no_license._id,
license_year='2015',
copyright_holders=['Bojack Horseman, Princess Carolyn']
)
assert_equal(self.preprint.license, None)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2015')
assert_equal(self.preprint.license.copyright_holders, ['Bojack Horseman, Princess Carolyn'])
def test_rw_contributor_cannot_update_license(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
res = self.make_request(self.url, data, auth=self.rw_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'User must be an admin to update a preprint.')
def test_read_contributor_cannot_update_license(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
res = self.make_request(self.url, data, auth=self.read_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_non_contributor_cannot_update_license(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
res = self.make_request(self.url, data, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_unauthenticated_user_cannot_update_license(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
res = self.make_request(self.url, data, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_update_preprint_with_invalid_license_for_provider(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.mit_license._id
)
assert_equal(self.preprint.license, None)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'Invalid license chosen for {}'.format(self.preprint_provider.name))
def test_update_preprint_with_existing_license_year_attribute_only(self):
self.preprint.set_preprint_license(
{
'id': self.no_license.license_id,
'year': '2014',
'copyrightHolders': ['Diane', 'Mr. Peanut Butter']
},
Auth(self.admin_contributor),
)
self.preprint.save()
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2014')
assert_equal(self.preprint.license.copyright_holders, ['Diane', 'Mr. Peanut Butter'])
data = self.make_payload(
node_id=self.preprint._id,
license_year='2015'
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.license.reload()
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2015')
assert_equal(self.preprint.license.copyright_holders, ['Diane', 'Mr. Peanut Butter'])
def test_update_preprint_with_existing_license_copyright_holders_attribute_only(self):
self.preprint.set_preprint_license(
{
'id': self.no_license.license_id,
'year': '2014',
'copyrightHolders': ['Diane', 'Mr. Peanut Butter']
},
Auth(self.admin_contributor),
)
self.preprint.save()
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2014')
assert_equal(self.preprint.license.copyright_holders, ['Diane', 'Mr. Peanut Butter'])
data = self.make_payload(
node_id=self.preprint._id,
copyright_holders=['Bojack Horseman', 'Princess Carolyn']
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.license.reload()
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2014')
assert_equal(self.preprint.license.copyright_holders, ['Bojack Horseman', 'Princess Carolyn'])
def test_update_preprint_with_existing_license_relationship_only(self):
self.preprint.set_preprint_license(
{
'id': self.no_license.license_id,
'year': '2014',
'copyrightHolders': ['Diane', 'Mr. Peanut Butter']
},
Auth(self.admin_contributor),
)
self.preprint.save()
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2014')
assert_equal(self.preprint.license.copyright_holders, ['Diane', 'Mr. Peanut Butter'])
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.license.reload()
assert_equal(self.preprint.license.node_license, self.cc0_license)
assert_equal(self.preprint.license.year, '2014')
assert_equal(self.preprint.license.copyright_holders, ['Diane', 'Mr. Peanut Butter'])
def test_update_preprint_with_existing_license_relationship_and_attributes(self):
self.preprint.set_preprint_license(
{
'id': self.no_license.license_id,
'year': '2014',
'copyrightHolders': ['Diane', 'Mr. Peanut Butter']
},
Auth(self.admin_contributor),
save=True
)
assert_equal(self.preprint.license.node_license, self.no_license)
assert_equal(self.preprint.license.year, '2014')
assert_equal(self.preprint.license.copyright_holders, ['Diane', 'Mr. Peanut Butter'])
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id,
license_year='2015',
copyright_holders=['Bojack Horseman', 'Princess Carolyn']
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.license.reload()
assert_equal(self.preprint.license.node_license, self.cc0_license)
assert_equal(self.preprint.license.year, '2015')
assert_equal(self.preprint.license.copyright_holders, ['Bojack Horseman', 'Princess Carolyn'])
def test_update_preprint_license_without_required_year_in_payload(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.no_license._id,
copyright_holders=['Rick', 'Morty']
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'year must be specified for this license')
def test_update_preprint_license_without_required_copyright_holders_in_payload_(self):
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.no_license._id,
license_year='1994'
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'copyrightHolders must be specified for this license')
def test_update_preprint_license_does_not_change_project_license(self):
self.preprint.node.set_node_license(
{
'id': self.no_license.license_id,
'year': '2015',
'copyrightHolders': ['Simba', 'Mufasa']
},
auth=Auth(self.admin_contributor)
)
self.preprint.node.save()
assert_equal(self.preprint.node.node_license.node_license, self.no_license)
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.cc0_license._id
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_equal(self.preprint.license.node_license, self.cc0_license)
assert_equal(self.preprint.node.node_license.node_license, self.no_license)
def test_update_preprint_license_without_change_does_not_add_log(self):
self.preprint.set_preprint_license(
{
'id': self.no_license.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(self.admin_contributor),
save=True
)
before_num_logs = self.preprint.node.logs.count()
before_update_log = self.preprint.node.logs.latest()
data = self.make_payload(
node_id=self.preprint._id,
license_id=self.no_license._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = self.make_request(self.url, data, auth=self.admin_contributor.auth)
self.preprint.node.reload()
after_num_logs = self.preprint.node.logs.count()
after_update_log = self.preprint.node.logs.latest()
assert_equal(res.status_code, 200)
assert_equal(before_num_logs, after_num_logs)
assert_equal(before_update_log._id, after_update_log._id)
class TestPreprintIsPublishedDetail(ApiTestCase):
def setUp(self):
super(TestPreprintIsPublishedDetail, self).setUp()
self.admin= AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.admin, is_public=True)
self.public_project.add_contributor(self.write_contrib, permissions=['read', 'write'], save=True)
self.subject = SubjectFactory()
self.provider = PreprintProviderFactory()
self.file_one_public_project = test_utils.create_test_file(self.public_project, self.admin, 'mgla.pdf')
self.unpublished_preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider, subjects=[[self.subject._id]], project=self.public_project, is_published=False)
self.url = '/{}preprints/{}/'.format(API_BASE, self.unpublished_preprint._id)
def test_unpublished_visible_to_admins(self):
res = self.app.get(self.url, auth=self.admin.auth)
assert res.json['data']['id'] == self.unpublished_preprint._id
def test_unpublished_invisible_to_write_contribs(self):
res = self.app.get(self.url, auth=self.write_contrib.auth, expect_errors=True)
assert res.status_code == 403
def test_unpublished_invisible_to_non_contribs(self):
res = self.app.get(self.url, auth=self.non_contrib.auth, expect_errors=True)
assert res.status_code == 403
def test_unpublished_invisible_to_public(self):
res = self.app.get(self.url, expect_errors=True)
assert res.status_code == 401
|
|
# -*- coding: utf-8 -*-
'''
This module allows you to manage extended attributes on files or directories
.. code-block:: bash
salt '*' xattr.list /path/to/file
'''
from __future__ import absolute_import
# Import Python Libs
import logging
# Import salt libs
import salt.utils.mac_utils
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "xattr"
def __virtual__():
'''
Only work on Mac OS
'''
if __grains__['os'] in ['MacOS', 'Darwin']:
return __virtualname__
return False
def list(path, hex=False):
'''
List all of the extended attributes on the given file/directory
:param str path: The file(s) to get attributes from
:param bool hex: Return the values with forced hexadecimal values
:return: A dictionary containing extended attributes and values for the
given file
:rtype: dict
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.list /path/to/file
salt '*' xattr.list /path/to/file hex=True
'''
cmd = 'xattr "{0}"'.format(path)
try:
ret = salt.utils.mac_utils.execute_return_result(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
if not ret:
return {}
attrs_ids = ret.split("\n")
attrs = {}
for id in attrs_ids:
attrs[id] = read(path, id, hex)
return attrs
def read(path, attribute, hex=False):
'''
Read the given attributes on the given file/directory
:param str path: The file to get attributes from
:param str attribute: The attribute to read
:param bool hex: Return the values with forced hexadecimal values
:return: A string containing the value of the named attribute
:rtype: str
:raises: CommandExecutionError on file not found, attribute not found, and
any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.read /path/to/file com.test.attr
salt '*' xattr.read /path/to/file com.test.attr hex=True
'''
hex_flag = ""
if hex:
hex_flag = "-x"
cmd = 'xattr -p {0} "{1}" "{2}"'.format(hex_flag, attribute, path)
try:
ret = salt.utils.mac_utils.execute_return_result(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
if 'No such xattr' in exc.strerror:
raise CommandExecutionError('Attribute not found: {0}'.format(attribute))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
return ret
def write(path, attribute, value, hex=False):
'''
Causes the given attribute name to be assigned the given value
:param str path: The file(s) to get attributes from
:param str attribute: The attribute name to be written to the file/directory
:param str value: The value to assign to the given attribute
:param bool hex: Set the values with forced hexadecimal values
:return: True if successful, otherwise False
:rtype: bool
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.write /path/to/file "com.test.attr" "value"
'''
hex_flag = ""
if hex:
hex_flag = "-x"
cmd = 'xattr -w {0} "{1}" "{2}" "{3}"'.format(hex_flag, attribute, value, path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
return read(path, attribute, hex) == value
def delete(path, attribute):
'''
Removes the given attribute from the file
:param str path: The file(s) to get attributes from
:param str attribute: The attribute name to be deleted from the
file/directory
:return: True if successful, otherwise False
:rtype: bool
:raises: CommandExecutionError on file not found, attribute not found, and
any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr"
'''
cmd = 'xattr -d "{0}" "{1}"'.format(attribute, path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
if 'No such xattr' in exc.strerror:
raise CommandExecutionError('Attribute not found: {0}'.format(attribute))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
return attribute not in list(path)
def clear(path):
'''
Causes the all attributes on the file/directory to be removed
:param str path: The file(s) to get attributes from
:return: True if successful, otherwise False
:raises: CommandExecutionError on file not found or any other unknown error
CLI Example:
.. code-block:: bash
salt '*' xattr.delete /path/to/file "com.test.attr"
'''
cmd = 'xattr -c "{0}"'.format(path)
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'No such file' in exc.strerror:
raise CommandExecutionError('File not found: {0}'.format(path))
raise CommandExecutionError('Unknown Error: {0}'.format(exc.strerror))
return list(path) == {}
|
|
import asyncio
from collections import deque, namedtuple
import itertools
import logging
import os
import threading
import weakref
import warnings
from tornado.concurrent import Future
from tornado.ioloop import IOLoop
from ..protocol import nested_deserialize
from ..utils import get_ip
from .registry import Backend, backends
from .core import Comm, Connector, Listener, CommClosedError
logger = logging.getLogger(__name__)
ConnectionRequest = namedtuple(
"ConnectionRequest", ("c2s_q", "s2c_q", "c_loop", "c_addr", "conn_event")
)
class Manager:
"""
An object coordinating listeners and their addresses.
"""
def __init__(self):
self.listeners = weakref.WeakValueDictionary()
self.addr_suffixes = itertools.count(1)
with warnings.catch_warnings():
# Avoid immediate warning for unreachable network
# (will still warn for other get_ip() calls when actually used)
warnings.simplefilter("ignore")
self.ip = get_ip()
self.lock = threading.Lock()
def add_listener(self, addr, listener):
with self.lock:
if addr in self.listeners:
raise RuntimeError("already listening on %r" % (addr,))
self.listeners[addr] = listener
def remove_listener(self, addr):
with self.lock:
try:
del self.listeners[addr]
except KeyError:
pass
def get_listener_for(self, addr):
with self.lock:
self.validate_address(addr)
return self.listeners.get(addr)
def new_address(self):
return "%s/%d/%s" % (self.ip, os.getpid(), next(self.addr_suffixes))
def validate_address(self, addr):
"""
Validate the address' IP and pid.
"""
ip, pid, suffix = addr.split("/")
if ip != self.ip or int(pid) != os.getpid():
raise ValueError(
"inproc address %r does not match host (%r) or pid (%r)"
% (addr, self.ip, os.getpid())
)
global_manager = Manager()
def new_address():
"""
Generate a new address.
"""
return "inproc://" + global_manager.new_address()
class QueueEmpty(Exception):
pass
class Queue:
"""
A single-reader, single-writer, non-threadsafe, peekable queue.
"""
def __init__(self):
self._q = deque()
self._read_future = None
def get_nowait(self):
q = self._q
if not q:
raise QueueEmpty
return q.popleft()
def get(self):
assert not self._read_future, "Only one reader allowed"
fut = Future()
q = self._q
if q:
fut.set_result(q.popleft())
else:
self._read_future = fut
return fut
def put_nowait(self, value):
q = self._q
fut = self._read_future
if fut is not None:
assert len(q) == 0
self._read_future = None
fut.set_result(value)
else:
q.append(value)
put = put_nowait
_omitted = object()
def peek(self, default=_omitted):
"""
Get the next object in the queue without removing it from the queue.
"""
q = self._q
if q:
return q[0]
elif default is not self._omitted:
return default
else:
raise QueueEmpty
_EOF = object()
class InProc(Comm):
"""
An established communication based on a pair of in-process queues.
Reminder: a Comm must always be used from a single thread.
Its peer Comm can be running in any thread.
"""
_initialized = False
def __init__(
self, local_addr, peer_addr, read_q, write_q, write_loop, deserialize=True
):
Comm.__init__(self)
self._local_addr = local_addr
self._peer_addr = peer_addr
self.deserialize = deserialize
self._read_q = read_q
self._write_q = write_q
self._write_loop = write_loop
self._closed = False
self._finalizer = weakref.finalize(self, self._get_finalizer())
self._finalizer.atexit = False
self._initialized = True
def _get_finalizer(self):
def finalize(write_q=self._write_q, write_loop=self._write_loop, r=repr(self)):
logger.warning("Closing dangling queue in %s" % (r,))
write_loop.add_callback(write_q.put_nowait, _EOF)
return finalize
@property
def local_address(self):
return self._local_addr
@property
def peer_address(self):
return self._peer_addr
async def read(self, deserializers="ignored"):
if self._closed:
raise CommClosedError
msg = await self._read_q.get()
if msg is _EOF:
self._closed = True
self._finalizer.detach()
raise CommClosedError
if self.deserialize:
msg = nested_deserialize(msg)
return msg
async def write(self, msg, serializers=None, on_error=None):
if self.closed():
raise CommClosedError
# Ensure we feed the queue in the same thread it is read from.
self._write_loop.add_callback(self._write_q.put_nowait, msg)
return 1
async def close(self):
self.abort()
def abort(self):
if not self.closed():
# Putting EOF is cheap enough that we do it on abort() too
self._write_loop.add_callback(self._write_q.put_nowait, _EOF)
self._read_q.put_nowait(_EOF)
self._write_q = self._read_q = None
self._closed = True
self._finalizer.detach()
def closed(self):
"""
Whether this comm is closed. An InProc comm is closed if:
1) close() or abort() was called on this comm
2) close() or abort() was called on the other end and the
read queue is empty
"""
if self._closed:
return True
# NOTE: repr() is called by finalize() during __init__()...
if self._initialized and self._read_q.peek(None) is _EOF:
self._closed = True
self._finalizer.detach()
return True
else:
return False
class InProcListener(Listener):
prefix = "inproc"
def __init__(self, address, comm_handler, deserialize=True):
self.manager = global_manager
self.address = address or self.manager.new_address()
self.comm_handler = comm_handler
self.deserialize = deserialize
self.listen_q = Queue()
async def _listen(self):
while True:
conn_req = await self.listen_q.get()
if conn_req is None:
break
comm = InProc(
local_addr="inproc://" + self.address,
peer_addr="inproc://" + conn_req.c_addr,
read_q=conn_req.c2s_q,
write_q=conn_req.s2c_q,
write_loop=conn_req.c_loop,
deserialize=self.deserialize,
)
# Notify connector
conn_req.c_loop.add_callback(conn_req.conn_event.set)
IOLoop.current().add_callback(self.comm_handler, comm)
def connect_threadsafe(self, conn_req):
self.loop.add_callback(self.listen_q.put_nowait, conn_req)
async def start(self):
self.loop = IOLoop.current()
self._listen_future = asyncio.ensure_future(self._listen())
self.manager.add_listener(self.address, self)
def stop(self):
self.listen_q.put_nowait(None)
self.manager.remove_listener(self.address)
@property
def listen_address(self):
return "inproc://" + self.address
@property
def contact_address(self):
return "inproc://" + self.address
class InProcConnector(Connector):
def __init__(self, manager):
self.manager = manager
async def connect(self, address, deserialize=True, **connection_args):
listener = self.manager.get_listener_for(address)
if listener is None:
raise IOError("no endpoint for inproc address %r" % (address,))
conn_req = ConnectionRequest(
c2s_q=Queue(),
s2c_q=Queue(),
c_loop=IOLoop.current(),
c_addr=self.manager.new_address(),
conn_event=asyncio.Event(),
)
listener.connect_threadsafe(conn_req)
# Wait for connection acknowledgement
# (do not pretend we're connected if the other comm never gets
# created, for example if the listener was stopped in the meantime)
await conn_req.conn_event.wait()
comm = InProc(
local_addr="inproc://" + conn_req.c_addr,
peer_addr="inproc://" + address,
read_q=conn_req.s2c_q,
write_q=conn_req.c2s_q,
write_loop=listener.loop,
deserialize=deserialize,
)
return comm
class InProcBackend(Backend):
manager = global_manager
# I/O
def get_connector(self):
return InProcConnector(self.manager)
def get_listener(self, loc, handle_comm, deserialize, **connection_args):
return InProcListener(loc, handle_comm, deserialize)
# Address handling
def get_address_host(self, loc):
self.manager.validate_address(loc)
return self.manager.ip
def resolve_address(self, loc):
return loc
def get_local_address_for(self, loc):
self.manager.validate_address(loc)
return self.manager.new_address()
backends["inproc"] = InProcBackend()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the gridfs package.
"""
import datetime
import itertools
import threading
import time
from io import BytesIO
from test import IntegrationTest, client_context, unittest
from test.utils import joinall, one, rs_client, rs_or_single_client, single_client
import gridfs
from bson.binary import Binary
from bson.int64 import Int64
from bson.objectid import ObjectId
from bson.son import SON
from gridfs.errors import CorruptGridFile, NoFile
from pymongo.errors import (
ConfigurationError,
NotPrimaryError,
ServerSelectionTimeoutError,
)
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import ReadPreference
class JustWrite(threading.Thread):
def __init__(self, gfs, num):
threading.Thread.__init__(self)
self.gfs = gfs
self.num = num
self.setDaemon(True)
def run(self):
for _ in range(self.num):
file = self.gfs.open_upload_stream("test")
file.write(b"hello")
file.close()
class JustRead(threading.Thread):
def __init__(self, gfs, num, results):
threading.Thread.__init__(self)
self.gfs = gfs
self.num = num
self.results = results
self.setDaemon(True)
def run(self):
for _ in range(self.num):
file = self.gfs.open_download_stream_by_name("test")
data = file.read()
self.results.append(data)
assert data == b"hello"
class TestGridfs(IntegrationTest):
fs: gridfs.GridFSBucket
alt: gridfs.GridFSBucket
@classmethod
def setUpClass(cls):
super(TestGridfs, cls).setUpClass()
cls.fs = gridfs.GridFSBucket(cls.db)
cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt")
def setUp(self):
self.cleanup_colls(
self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks
)
def test_basic(self):
oid = self.fs.upload_from_stream("test_filename", b"hello world")
self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read())
self.assertEqual(1, self.db.fs.files.count_documents({}))
self.assertEqual(1, self.db.fs.chunks.count_documents({}))
self.fs.delete(oid)
self.assertRaises(NoFile, self.fs.open_download_stream, oid)
self.assertEqual(0, self.db.fs.files.count_documents({}))
self.assertEqual(0, self.db.fs.chunks.count_documents({}))
def test_multi_chunk_delete(self):
self.assertEqual(0, self.db.fs.files.count_documents({}))
self.assertEqual(0, self.db.fs.chunks.count_documents({}))
gfs = gridfs.GridFSBucket(self.db)
oid = gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1)
self.assertEqual(1, self.db.fs.files.count_documents({}))
self.assertEqual(5, self.db.fs.chunks.count_documents({}))
gfs.delete(oid)
self.assertEqual(0, self.db.fs.files.count_documents({}))
self.assertEqual(0, self.db.fs.chunks.count_documents({}))
def test_empty_file(self):
oid = self.fs.upload_from_stream("test_filename", b"")
self.assertEqual(b"", self.fs.open_download_stream(oid).read())
self.assertEqual(1, self.db.fs.files.count_documents({}))
self.assertEqual(0, self.db.fs.chunks.count_documents({}))
raw = self.db.fs.files.find_one()
assert raw is not None
self.assertEqual(0, raw["length"])
self.assertEqual(oid, raw["_id"])
self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime))
self.assertEqual(255 * 1024, raw["chunkSize"])
self.assertNotIn("md5", raw)
def test_corrupt_chunk(self):
files_id = self.fs.upload_from_stream("test_filename", b"foobar")
self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}})
try:
out = self.fs.open_download_stream(files_id)
self.assertRaises(CorruptGridFile, out.read)
out = self.fs.open_download_stream(files_id)
self.assertRaises(CorruptGridFile, out.readline)
finally:
self.fs.delete(files_id)
def test_upload_ensures_index(self):
chunks = self.db.fs.chunks
files = self.db.fs.files
# Ensure the collections are removed.
chunks.drop()
files.drop()
self.fs.upload_from_stream("filename", b"junk")
self.assertTrue(
any(
info.get("key") == [("files_id", 1), ("n", 1)]
for info in chunks.index_information().values()
)
)
self.assertTrue(
any(
info.get("key") == [("filename", 1), ("uploadDate", 1)]
for info in files.index_information().values()
)
)
def test_ensure_index_shell_compat(self):
files = self.db.fs.files
for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2):
# Create the index with different numeric types (as might be done
# from the mongo shell).
shell_index = [("filename", i), ("uploadDate", j)]
self.db.command(
"createIndexes",
files.name,
indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}],
)
# No error.
self.fs.upload_from_stream("filename", b"data")
self.assertTrue(
any(
info.get("key") == [("filename", 1), ("uploadDate", 1)]
for info in files.index_information().values()
)
)
files.drop()
def test_alt_collection(self):
oid = self.alt.upload_from_stream("test_filename", b"hello world")
self.assertEqual(b"hello world", self.alt.open_download_stream(oid).read())
self.assertEqual(1, self.db.alt.files.count_documents({}))
self.assertEqual(1, self.db.alt.chunks.count_documents({}))
self.alt.delete(oid)
self.assertRaises(NoFile, self.alt.open_download_stream, oid)
self.assertEqual(0, self.db.alt.files.count_documents({}))
self.assertEqual(0, self.db.alt.chunks.count_documents({}))
self.assertRaises(NoFile, self.alt.open_download_stream, "foo")
self.alt.upload_from_stream("foo", b"hello world")
self.assertEqual(b"hello world", self.alt.open_download_stream_by_name("foo").read())
self.alt.upload_from_stream("mike", b"")
self.alt.upload_from_stream("test", b"foo")
self.alt.upload_from_stream("hello world", b"")
self.assertEqual(
set(["mike", "test", "hello world", "foo"]),
set(k["filename"] for k in list(self.db.alt.files.find())),
)
def test_threaded_reads(self):
self.fs.upload_from_stream("test", b"hello")
threads = []
results: list = []
for i in range(10):
threads.append(JustRead(self.fs, 10, results))
threads[i].start()
joinall(threads)
self.assertEqual(100 * [b"hello"], results)
def test_threaded_writes(self):
threads = []
for i in range(10):
threads.append(JustWrite(self.fs, 10))
threads[i].start()
joinall(threads)
fstr = self.fs.open_download_stream_by_name("test")
self.assertEqual(fstr.read(), b"hello")
# Should have created 100 versions of 'test' file
self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"}))
def test_get_last_version(self):
one = self.fs.upload_from_stream("test", b"foo")
time.sleep(0.01)
two = self.fs.open_upload_stream("test")
two.write(b"bar")
two.close()
time.sleep(0.01)
two = two._id
three = self.fs.upload_from_stream("test", b"baz")
self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test").read())
self.fs.delete(three)
self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test").read())
self.fs.delete(two)
self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test").read())
self.fs.delete(one)
self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test")
def test_get_version(self):
self.fs.upload_from_stream("test", b"foo")
time.sleep(0.01)
self.fs.upload_from_stream("test", b"bar")
time.sleep(0.01)
self.fs.upload_from_stream("test", b"baz")
time.sleep(0.01)
self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=0).read())
self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=1).read())
self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=2).read())
self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=-1).read())
self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=-2).read())
self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=-3).read())
self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=3)
self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=-4)
def test_upload_from_stream(self):
oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1)
self.assertEqual(11, self.db.fs.chunks.count_documents({}))
self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read())
def test_upload_from_stream_with_id(self):
oid = ObjectId()
self.fs.upload_from_stream_with_id(
oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1
)
self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read())
def test_open_upload_stream(self):
gin = self.fs.open_upload_stream("from_stream")
gin.write(b"from stream")
gin.close()
self.assertEqual(b"from stream", self.fs.open_download_stream(gin._id).read())
def test_open_upload_stream_with_id(self):
oid = ObjectId()
gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id")
gin.write(b"from stream with custom id")
gin.close()
self.assertEqual(b"from stream with custom id", self.fs.open_download_stream(oid).read())
def test_missing_length_iter(self):
# Test fix that guards against PHP-237
self.fs.upload_from_stream("empty", b"")
doc = self.db.fs.files.find_one({"filename": "empty"})
assert doc is not None
doc.pop("length")
self.db.fs.files.replace_one({"_id": doc["_id"]}, doc)
fstr = self.fs.open_download_stream_by_name("empty")
def iterate_file(grid_file):
for _ in grid_file:
pass
return True
self.assertTrue(iterate_file(fstr))
def test_gridfs_lazy_connect(self):
client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=0)
cdb = client.db
gfs = gridfs.GridFSBucket(cdb)
self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0)
gfs = gridfs.GridFSBucket(cdb)
self.assertRaises(
ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b""
) # Still no connection.
def test_gridfs_find(self):
self.fs.upload_from_stream("two", b"test2")
time.sleep(0.01)
self.fs.upload_from_stream("two", b"test2+")
time.sleep(0.01)
self.fs.upload_from_stream("one", b"test1")
time.sleep(0.01)
self.fs.upload_from_stream("two", b"test2++")
files = self.db.fs.files
self.assertEqual(3, files.count_documents({"filename": "two"}))
self.assertEqual(4, files.count_documents({}))
cursor = self.fs.find(
{}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2
)
gout = next(cursor)
self.assertEqual(b"test1", gout.read())
cursor.rewind()
gout = next(cursor)
self.assertEqual(b"test1", gout.read())
gout = next(cursor)
self.assertEqual(b"test2+", gout.read())
self.assertRaises(StopIteration, cursor.__next__)
cursor.close()
self.assertRaises(TypeError, self.fs.find, {}, {"_id": True})
def test_grid_in_non_int_chunksize(self):
# Lua, and perhaps other buggy GridFS clients, store size as a float.
data = b"data"
self.fs.upload_from_stream("f", data)
self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}})
self.assertEqual(data, self.fs.open_download_stream_by_name("f").read())
def test_unacknowledged(self):
# w=0 is prohibited.
with self.assertRaises(ConfigurationError):
gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test)
def test_rename(self):
_id = self.fs.upload_from_stream("first_name", b"testing")
self.assertEqual(b"testing", self.fs.open_download_stream_by_name("first_name").read())
self.fs.rename(_id, "second_name")
self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name")
self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read())
def test_abort(self):
gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5)
gin.write(b"test1")
gin.write(b"test2")
gin.write(b"test3")
self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id}))
gin.abort()
self.assertTrue(gin.closed)
self.assertRaises(ValueError, gin.write, b"test4")
self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id}))
def test_download_to_stream(self):
file1 = BytesIO(b"hello world")
# Test with one chunk.
oid = self.fs.upload_from_stream("one_chunk", file1)
self.assertEqual(1, self.db.fs.chunks.count_documents({}))
file2 = BytesIO()
self.fs.download_to_stream(oid, file2)
file1.seek(0)
file2.seek(0)
self.assertEqual(file1.read(), file2.read())
# Test with many chunks.
self.db.drop_collection("fs.files")
self.db.drop_collection("fs.chunks")
file1.seek(0)
oid = self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1)
self.assertEqual(11, self.db.fs.chunks.count_documents({}))
file2 = BytesIO()
self.fs.download_to_stream(oid, file2)
file1.seek(0)
file2.seek(0)
self.assertEqual(file1.read(), file2.read())
def test_download_to_stream_by_name(self):
file1 = BytesIO(b"hello world")
# Test with one chunk.
_ = self.fs.upload_from_stream("one_chunk", file1)
self.assertEqual(1, self.db.fs.chunks.count_documents({}))
file2 = BytesIO()
self.fs.download_to_stream_by_name("one_chunk", file2)
file1.seek(0)
file2.seek(0)
self.assertEqual(file1.read(), file2.read())
# Test with many chunks.
self.db.drop_collection("fs.files")
self.db.drop_collection("fs.chunks")
file1.seek(0)
self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1)
self.assertEqual(11, self.db.fs.chunks.count_documents({}))
file2 = BytesIO()
self.fs.download_to_stream_by_name("many_chunks", file2)
file1.seek(0)
file2.seek(0)
self.assertEqual(file1.read(), file2.read())
def test_md5(self):
gin = self.fs.open_upload_stream("no md5")
gin.write(b"no md5 sum")
gin.close()
self.assertIsNone(gin.md5)
gout = self.fs.open_download_stream(gin._id)
self.assertIsNone(gout.md5)
gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5")
gin.write(b"also no md5 sum")
gin.close()
self.assertIsNone(gin.md5)
gout = self.fs.open_download_stream(gin._id)
self.assertIsNone(gout.md5)
class TestGridfsBucketReplicaSet(IntegrationTest):
@classmethod
@client_context.require_secondaries_count(1)
def setUpClass(cls):
super(TestGridfsBucketReplicaSet, cls).setUpClass()
@classmethod
def tearDownClass(cls):
client_context.client.drop_database("gfsbucketreplica")
def test_gridfs_replica_set(self):
rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY)
gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest")
oid = gfs.upload_from_stream("test_filename", b"foo")
content = gfs.open_download_stream(oid).read()
self.assertEqual(b"foo", content)
def test_gridfs_secondary(self):
secondary_host, secondary_port = one(self.client.secondaries)
secondary_connection = single_client(
secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY
)
# Should detect it's connected to secondary and not attempt to
# create index
gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest")
# This won't detect secondary, raises error
self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"foo")
def test_gridfs_secondary_lazy(self):
# Should detect it's connected to secondary and not attempt to
# create index.
secondary_host, secondary_port = one(self.client.secondaries)
client = single_client(
secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False
)
# Still no connection.
gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest")
# Connects, doesn't create index.
self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename")
self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"data")
if __name__ == "__main__":
unittest.main()
|
|
# -----------------------------------------------------------------------------
import datetime
import os
import glob
from PySide import QtCore, QtGui
from dpa.config import Config
from dpa.action.registry import ActionRegistry
from dpa.notify import Notification, emails_from_unames
from dpa.ptask.area import PTaskArea, PTaskAreaError
from dpa.ptask import PTask
from dpa.queue import get_unique_id, create_queue_task
from dpa.ui.dk.base import BaseDarkKnightDialog, DarkKnightError
from dpa.user import current_username, User
# -----------------------------------------------------------------------------
DK_CONFIG_PATH = "config/notify/dk.cfg"
# -----------------------------------------------------------------------------
class HoudiniDarkKnightDialog(BaseDarkKnightDialog):
# XXX meh.
RENDER_QUEUES = ['cheezwhiz', 'cheddar', 'hold', 'nuke', 'velveeta',
'muenster']
# -------------------------------------------------------------------------
def __init__(self, parent=None):
super(HoudiniDarkKnightDialog, self).__init__(parent=parent)
# ---- controls
controls_widget = self._setup_controls()
scroll_area = QtGui.QScrollArea()
scroll_area.setFocusPolicy(QtCore.Qt.NoFocus)
scroll_area.setWidgetResizable(True)
scroll_area.setWidget(controls_widget)
self.main_layout.addWidget(scroll_area)
self.main_layout.setStretchFactor(scroll_area, 1000)
# ---- submit btn
cancel_btn = QtGui.QPushButton("Cancel")
cancel_btn.clicked.connect(self.close)
submit_btn = QtGui.QPushButton("Submit")
submit_btn.clicked.connect(self.accept)
btn_layout = QtGui.QHBoxLayout()
btn_layout.setContentsMargins(4, 4, 4, 4)
btn_layout.addStretch()
btn_layout.addWidget(cancel_btn)
btn_layout.addWidget(submit_btn)
btn_layout.addStretch()
self.main_layout.addLayout(btn_layout)
self.main_layout.setStretchFactor(btn_layout, 0)
self._version_note_edit.setFocus()
# -------------------------------------------------------------------------
def accept(self):
self.setEnabled(False)
# ---- get the values from the UI
self._frange = self._get_frange_from_controls()
if not self._frange:
self.setEnabled(True)
return
self._frame_list = self._frange.frames
self._render_queue = self._render_queues.currentText()
self._version_note = self._version_note_edit.text()
self._node_to_render = self._write_node_select.itemData(
self._write_node_select.currentIndex())
self._debug_mode = self._debug.isChecked()
if not self._version_note:
self._show_error("Please specify a description of " +
"what's changed in this version.")
self.setEnabled(True)
return
try:
self._render_to_product()
except Exception as e:
self.setEnabled(True)
raise
else:
super(HoudiniDarkKnightDialog, self).accept()
self.setEnabled(True)
# -------------------------------------------------------------------------
def _render_to_product(self):
# get render node reference
render_node = self.session.hou.node(self._node_to_render)
# ---- progress dialog
num_ops = 8
cur_op = 0
progress_dialog = QtGui.QProgressDialog(
"Product render...", "", cur_op, num_ops, self)
progress_dialog.setWindowTitle("Dark Knight is busy...")
progress_dialog.setAutoReset(False)
progress_dialog.setLabelText("Preparing nuke file for rendering...")
progress_dialog.show()
#########################################
# ensure the product has been created
#########################################
progress_dialog.setLabelText("Creating product...")
if not render_node.type().name()=='ifd' or not self._version_note:
raise Exception("The supplied node is not a WriteProduct node.")
print "Creating product for node... " + str(render_node)
ptask_area = PTaskArea.current()
ptask = PTask.get(ptask_area.spec)
if ptask_area.version:
ptask_version = ptask.version(ptask_area.version)
else:
ptask_version = ptask.latest_version
category = 'imgseq'
file_type = 'exr'
product_name = render_node.name()
product_desc = render_node.name() + " mantra render"
product_ver_note = self._version_note
camera_node = self.session.hou.node(render_node.evalParm('camera'))
if not camera_node:
raise Exception("Camera specified is not valid.")
width = camera_node.evalParm("resx")
height = camera_node.evalParm("resy")
resolution = "%sx%s" % (width, height)
create_action_cls = ActionRegistry().get_action('create', 'product')
if not create_action_cls:
raise Exception("Unable to find product creation action.")
create_action = create_action_cls(
product=product_name,
ptask=ptask.spec,
version=ptask_version.number,
category=category,
description=product_desc,
file_type=file_type,
resolution=resolution,
note=product_ver_note,
)
try:
create_action()
except ActionError as e:
raise Exception("Unable to create product: " + str(e))
# provision the ifd directory
try:
create_action.product_repr.area.provision('ifd')
except Exception as e:
raise Exception(
"Unable to create ifd file directory: " + str(e))
ifd_dir = os.path.join(create_action.product_repr.area.path,
'ifd', product_name + '.$F4.ifd')
out_path = os.path.join(create_action.product_repr.area.path,
product_name + '.$F4.' + file_type)
# by default, the mantra frame range has an expression on frame numbers
render_node.parm('f1').deleteAllKeyframes()
render_node.parm('f2').deleteAllKeyframes()
# set frange
render_node.parm('trange').set(1)
render_node.parm('f1').set(self._frange.start)
render_node.parm('f2').set(self._frange.end)
render_node.parm('f3').set(self._frange.step)
# set output
render_node.parm('soho_outputmode').set(1)
render_node.parm('soho_diskfile').set(ifd_dir)
render_node.parm('soho_diskfile').disable(0)
render_node.parm('vm_picture').set(out_path)
render_node.parm('soho_mkpath').set(1)
product_repr = create_action.product_repr
product_repr_area = product_repr.area
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# create ifd files
#########################################
progress_dialog.setLabelText("Generating ifd files...")
render_node.parm('execute').pressButton()
ifd_file_list = glob.glob(
os.path.join(
create_action.product_repr.area.path,
'ifd', '*.ifd')
)
for ifd_file in ifd_file_list:
os.chmod(ifd_file, 0770)
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# sync current work area to version snapshot to render from
#########################################
progress_dialog.setLabelText("Sync'ing the latest work...")
try:
self.session.save()
self._sync_latest()
except Exception as e:
self._show_error("Unable to save & sync the latest work: " + str(e))
self.setEnabled(True)
progress_dialog.close()
return
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# ensure queue directory exists
#########################################
progress_dialog.setLabelText("Provisioning the queue directory...")
try:
product_repr_area.provision('queue')
except Exception as e:
raise DarkKnightError(
"Unable to create queue scripts directory: " + str(e))
cur_op += 1
progress_dialog.setValue(cur_op)
out_dir = product_repr_area.path
ifd_dir = product_repr_area.dir(dir_name='ifd')
queue_dir = product_repr_area.dir(dir_name='queue')
tasks_info_file = os.path.join(queue_dir, 'tasks_info.cfg')
tasks_info_config = Config()
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# buidling queue scripts
#########################################
progress_dialog.setLabelText("Building the queue script...")
# # dpaset command to run
dpaset_cmd = 'eval "`dpa env ptask {pt}@{vn}`"'.format(
pt=ptask.spec, vn=ptask_version.number)
# write out queue shell scripts
frame_scripts = []
for frame in self._frame_list:
frame_padded = str(frame).zfill(4)
ifd_file = os.path.join(ifd_dir,
"{pn}.{fn}.ifd".format(pn=product_name, fn=frame_padded))
script_path = os.path.join(queue_dir,
"{pn}.{fn}.sh".format(pn=product_name, fn=frame_padded))
out_file = os.path.join(out_dir,
"{pn}.{fn}.{ft}".format(pn=product_name, fn=frame_padded, ft=file_type) )
render_cmd = "/opt/hfs14/bin/mantra -f {ifd} -V 2a".\
format(
ifd=ifd_file
)
with open(script_path, "w") as script_file:
script_file.write("#!/bin/bash\n\n")
# XXX these should happen automatically in the queue...
script_file.write("source /DPA/wookie/dpa/bash/startup.bash\n")
script_file.write("pipeup\n\n")
script_file.write("# set the ptask version to render\n")
script_file.write(dpaset_cmd + "\n\n")
script_file.write("# render!\n")
script_file.write(render_cmd + "\n\n")
frame_scripts.append((frame_padded, script_path, out_file))
os.chmod(script_path, 0770)
cur_op += 1
progress_dialog.setValue(cur_op)
################################################
# submit to the queue
################################################
now = datetime.datetime.now()
task_id_base = get_unique_id(product_repr_area.spec, dt=now)
frame_tasks = []
# create frame tasks
for (frame, frame_script, out_file) in frame_scripts:
progress_dialog.setLabelText(
"Submitting frame: " + frame_script)
task_id = task_id_base + "_" + frame
if not self._debug_mode:
# create tasks, don't actually submit yet
create_queue_task(self._render_queue, frame_script, task_id,
output_file=out_file, submit=False,
log_path=frame_script + '.log')
frame_tasks.append((frame, task_id))
#
# resubmit frame-by-frame because
# group submit seems to be occasionally
# having problems.
os.system("cqresubmittask {qn} {tid}".format(
qn=self._render_queue, tid=task_id))
cur_op += 1
progress_dialog.setValue(cur_op)
################################################
# task info stuff, allows task ids to
# be retrieved with product spec
################################################
progress_dialog.setLabelText("Creating task info file...")
tasks_info_file = os.path.join(queue_dir, 'tasks_info.cfg')
tasks_info_config = Config()
tasks_info_config.add('base_id', task_id_base)
frame_info = Config()
for (frame, task_id) in frame_tasks:
frame_info.add(str(frame), task_id)
tasks_info_config.add('frame_ids', frame_info)
tasks_info_config.write(tasks_info_file)
os.chmod(tasks_info_file, 0660)
cur_op += 1
progress_dialog.setValue(cur_op)
################################################
# email report
################################################
if not self._debug_mode:
# send msg...
msg_title = "Queue submission report: " + \
now.strftime("%Y/%m/%d %H:%M:%S")
msg_body = "Submitted the following tasks for " + \
ptask.spec + ":\n\n"
msg_body += " Description: " + self._version_note + "\n"
msg_body += " Resolution: " + resolution + "\n"
msg_body += " Render queue: " + self._render_queue + "\n"
msg_body += " Frames: " + str(self._frange) + "\n"
msg_body += " Ifd directory: " + ifd_dir + "\n"
msg_body += "\n"
msg_body += " Base task ID: " + task_id_base + "\n"
msg_body += " Product representation: " + \
product_repr.spec + "\n"
msg_body += " Scripts directory: " + queue_dir + "\n"
msg_body += "\n"
dk_config = ptask.area.config(DK_CONFIG_PATH,
composite_ancestors=True, composite_method="append")
recipients = dk_config.get('notify', [])
recipients.append(current_username())
recipients = emails_from_unames(recipients)
notification = Notification(msg_title, msg_body, recipients,
sender=User.current().email)
notification.send_email()
print recipients
cur_op += 1
progress_dialog.setValue(cur_op)
progress_dialog.close()
# -------------------------------------------------------------------------
def _setup_controls(self):
# ---- verison node
version_note_lbl = QtGui.QLabel("Version description:")
self._version_note_edit = QtGui.QLineEdit()
# ---- mantra nodes
write_nodes = [node
for node in self.session.hou.node("/").allSubChildren()
if node.type().name()=='ifd']
if not write_nodes:
raise DarkKnightError("No WriteProduct nodes to render.")
try:
default_node = self.session.hou.selectedNodes()[0]
except:
default_node = write_nodes[0]
write_node_lbl = QtGui.QLabel('Rendering:')
self._write_node_select = QtGui.QComboBox()
default_index = 0
for (i, node) in enumerate(write_nodes):
node_name = node.name()
node_path = node.path()
node_disp = "{pn} ({nn})".format(
pn=node_name, nn=node_path)
self._write_node_select.addItem(node_disp, node_path)
if node_name == default_node.name():
default_index = i
self._write_node_select.setCurrentIndex(default_index)
# ---- frame range
# frange
render_node_path = self._write_node_select.itemData(
self._write_node_select.currentIndex())
render_node = self.session.hou.node(render_node_path)
min_time = render_node.evalParm('f1')
max_time = render_node.evalParm('f2')
start_time = min_time
end_time = max_time
frange_lbl = QtGui.QLabel("Frame range:")
self._make_frame_range_controls(
min_time, max_time, start_time, end_time)
self._frame_step.setValue(render_node.evalParm('f3'))
controls_layout = QtGui.QGridLayout()
# ---- queue
render_queue_lbl = QtGui.QLabel("Render queue:")
self._render_queues = QtGui.QComboBox()
self._render_queues.addItems(self.__class__.RENDER_QUEUES)
# ---- debug
debug_lbl = QtGui.QLabel("Debug mode:")
self._debug = QtGui.QCheckBox("")
#
self.connect(self._write_node_select, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self._updateFrange)
# ---- layout the controls
controls_layout.addWidget(version_note_lbl, 0, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._version_note_edit, 0, 1)
controls_layout.addWidget(write_node_lbl, 1, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._write_node_select, 1, 1)
controls_layout.addWidget(frange_lbl, 2, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._frange_stack, 2, 1, QtCore.Qt.AlignLeft)
controls_layout.addWidget(self._frange_btn, 2, 2, QtCore.Qt.AlignLeft)
controls_layout.addWidget(render_queue_lbl, 3, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._render_queues, 3, 1, QtCore.Qt.AlignLeft)
controls_layout.addWidget(debug_lbl, 4, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._debug, 4, 1, QtCore.Qt.AlignLeft)
controls_layout.setColumnStretch(2, 1000)
controls_vbox = QtGui.QVBoxLayout()
controls_vbox.addLayout(controls_layout)
controls_vbox.addStretch()
controls_widget = QtGui.QWidget()
controls_widget.setLayout(controls_vbox)
return controls_widget
# -------------------------------------------------------------------------
def _updateFrange(self):
render_node_path = self._write_node_select.itemData(
self._write_node_select.currentIndex())
render_node = self.session.hou.node(render_node_path)
self._frame_start.setValue(render_node.evalParm('f1'))
self._frame_end.setValue(render_node.evalParm('f2'))
self._frame_step.setValue(render_node.evalParm('f3'))
|
|
# -*- coding: utf-8 -*-
"""
core
~~~~
Core functionality shared between the extension and the decorator.
:copyright: (c) 2022 by Ashley Sommer (based on flask-cors by Cory Dolphin).
:license: MIT, see LICENSE for more details.
"""
import re
import logging
import collections
from datetime import timedelta
try:
# Sanic compat Header from Sanic v19.9.0 and above
from sanic.compat import Header as CIMultiDict
except ImportError:
try:
# Sanic server CIMultiDict from Sanic v0.8.0 and above
from sanic.server import CIMultiDict
except ImportError:
raise RuntimeError("Your version of sanic does not support "
"CIMultiDict")
LOG = logging.getLogger(__name__)
# Response Headers
ACL_ORIGIN = 'Access-Control-Allow-Origin'
ACL_METHODS = 'Access-Control-Allow-Methods'
ACL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ACL_EXPOSE_HEADERS = 'Access-Control-Expose-Headers'
ACL_CREDENTIALS = 'Access-Control-Allow-Credentials'
ACL_MAX_AGE = 'Access-Control-Max-Age'
# Request Header
ACL_REQUEST_METHOD = 'Access-Control-Request-Method'
ACL_REQUEST_HEADERS = 'Access-Control-Request-Headers'
ALL_METHODS = ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE']
CONFIG_OPTIONS = ['CORS_ORIGINS', 'CORS_METHODS', 'CORS_ALLOW_HEADERS',
'CORS_EXPOSE_HEADERS', 'CORS_SUPPORTS_CREDENTIALS',
'CORS_MAX_AGE', 'CORS_SEND_WILDCARD',
'CORS_AUTOMATIC_OPTIONS', 'CORS_VARY_HEADER',
'CORS_RESOURCES', 'CORS_INTERCEPT_EXCEPTIONS',
'CORS_ALWAYS_SEND']
# Attribute added to request object by decorator to indicate that CORS
# was evaluated, in case the decorator and extension are both applied
# to a view.
# TODO: Refactor these two flags down into one flag.
SANIC_CORS_EVALUATED = '_sanic_cors_e'
SANIC_CORS_SKIP_RESPONSE_MIDDLEWARE = "_sanic_cors_srm"
# Strange, but this gets the type of a compiled regex, which is otherwise not
# exposed in a public API.
RegexObject = type(re.compile(''))
DEFAULT_OPTIONS = dict(origins='*',
methods=ALL_METHODS,
allow_headers='*',
expose_headers=None,
supports_credentials=False,
max_age=None,
send_wildcard=False,
automatic_options=True,
vary_header=True,
resources=r'/*',
intercept_exceptions=True,
always_send=True)
def parse_resources(resources):
if isinstance(resources, dict):
# To make the API more consistent with the decorator, allow a
# resource of '*', which is not actually a valid regexp.
resources = [(re_fix(k), v) for k, v in resources.items()]
# Sort by regex length to provide consistency of matching and
# to provide a proxy for specificity of match. E.G. longer
# regular expressions are tried first.
def pattern_length(pair):
maybe_regex, _ = pair
return len(get_regexp_pattern(maybe_regex))
return sorted(resources,
key=pattern_length,
reverse=True)
elif isinstance(resources, str):
return [(re_fix(resources), {})]
elif isinstance(resources, collections.abc.Iterable):
return [(re_fix(r), {}) for r in resources]
# Type of compiled regex is not part of the public API. Test for this
# at runtime.
elif isinstance(resources, RegexObject):
return [(re_fix(resources), {})]
else:
raise ValueError("Unexpected value for resources argument.")
def get_regexp_pattern(regexp):
"""
Helper that returns regexp pattern from given value.
:param regexp: regular expression to stringify
:type regexp: _sre.SRE_Pattern or str
:returns: string representation of given regexp pattern
:rtype: str
"""
try:
return regexp.pattern
except AttributeError:
return str(regexp)
def get_cors_origins(options, request_origin):
origins = options.get('origins')
wildcard = r'.*' in origins
# If the Origin header is not present terminate this set of steps.
# The request is outside the scope of this specification.-- W3Spec
if request_origin:
LOG.debug("CORS request received with 'Origin' %s", request_origin)
# If the allowed origins is an asterisk or 'wildcard', always match
if wildcard and options.get('send_wildcard'):
LOG.debug("Allowed origins are set to '*'. Sending wildcard CORS header.")
return ['*']
# If the value of the Origin header is a case-sensitive match
# for any of the values in list of origins
elif try_match_any(request_origin, origins):
LOG.debug("The request's Origin header matches. Sending CORS headers.", )
# Add a single Access-Control-Allow-Origin header, with either
# the value of the Origin header or the string "*" as value.
# -- W3Spec
return [request_origin]
else:
LOG.debug("The request's Origin header does not match any of allowed origins.")
return None
elif options.get('always_send'):
if wildcard:
# If wildcard is in the origins, even if 'send_wildcard' is False,
# simply send the wildcard. It is the most-likely to be correct
# thing to do (the only other option is to return nothing, which)
# pretty is probably not whawt you want if you specify origins as
# '*'
return ['*']
else:
# Return all origins that are not regexes.
return sorted([o for o in origins if not probably_regex(o)])
# Terminate these steps, return the original request untouched.
else:
LOG.debug("The request did not contain an 'Origin' header. "
"This means the browser or client did not request CORS, ensure the Origin Header is set.")
return None
def get_allow_headers(options, acl_request_headers):
if acl_request_headers:
request_headers = [h.strip() for h in acl_request_headers.split(',')]
# any header that matches in the allow_headers
matching_headers = filter(
lambda h: try_match_any(h, options.get('allow_headers')),
request_headers
)
return ', '.join(sorted(matching_headers))
return None
def get_cors_headers(options, request_headers, request_method):
origins_to_set = get_cors_origins(options, request_headers.get('Origin'))
headers = CIMultiDict()
if not origins_to_set: # CORS is not enabled for this route
return headers
for origin in origins_to_set:
# TODO, with CIDict, with will only allow one origin
# With CIMultiDict it should work with multiple
headers[ACL_ORIGIN] = origin
headers[ACL_EXPOSE_HEADERS] = options.get('expose_headers')
if options.get('supports_credentials'):
headers[ACL_CREDENTIALS] = 'true' # case sensative
# This is a preflight request
# http://www.w3.org/TR/cors/#resource-preflight-requests
if request_method == 'OPTIONS':
acl_request_method = request_headers.get(ACL_REQUEST_METHOD, '').upper()
# If there is no Access-Control-Request-Method header or if parsing
# failed, do not set any additional headers
if acl_request_method and acl_request_method in options.get('methods'):
# If method is not a case-sensitive match for any of the values in
# list of methods do not set any additional headers and terminate
# this set of steps.
headers[ACL_ALLOW_HEADERS] = get_allow_headers(options, request_headers.get(ACL_REQUEST_HEADERS))
headers[ACL_MAX_AGE] = str(options.get('max_age')) # sanic cannot handle integers in header values.
headers[ACL_METHODS] = options.get('methods')
else:
LOG.info("The request's Access-Control-Request-Method header does not match allowed methods. "
"CORS headers will not be applied.")
# http://www.w3.org/TR/cors/#resource-implementation
if options.get('vary_header'):
# Only set header if the origin returned will vary dynamically,
# i.e. if we are not returning an asterisk, and there are multiple
# origins that can be matched.
if headers[ACL_ORIGIN] == '*':
pass
elif (len(options.get('origins')) > 1 or
len(origins_to_set) > 1 or
any(map(probably_regex, options.get('origins')))):
headers['Vary'] = 'Origin'
return CIMultiDict((k, v) for k, v in headers.items() if v)
def set_cors_headers(req, resp, req_context, options):
"""
Performs the actual evaluation of Sanic-CORS options and actually
modifies the response object.
This function is used in the decorator, the CORS exception wrapper,
and the after_request callback
:param sanic.request.Request req:
"""
# If CORS has already been evaluated via the decorator, skip
if req_context is not None:
evaluated = getattr(req_context, SANIC_CORS_EVALUATED, False)
if evaluated:
LOG.debug('CORS have been already evaluated, skipping')
return resp
# `resp` can be None or [] in the case of using Websockets
# however this case should have been handled in the `extension` and `decorator` methods
# before getting here. This is a final failsafe check to prevent crashing
if not resp:
return None
if resp.headers is None:
resp.headers = CIMultiDict()
headers_to_set = get_cors_headers(options, req.headers, req.method)
LOG.debug('Settings CORS headers: %s', str(headers_to_set))
for k, v in headers_to_set.items():
try:
resp.headers.add(k, v)
except Exception as e2:
resp.headers[k] = v
return resp
def probably_regex(maybe_regex):
if isinstance(maybe_regex, RegexObject):
return True
else:
common_regex_chars = ['*', '\\',']', '?']
# Use common characters used in regular expressions as a proxy
# for if this string is in fact a regex.
return any((c in maybe_regex for c in common_regex_chars))
def re_fix(reg):
"""
Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to
enable the CORS app extension to have a more user friendly api.
"""
return r'.*' if reg == r'*' else reg
def try_match_any(inst, patterns):
return any(try_match(inst, pattern) for pattern in patterns)
def try_match(request_origin, maybe_regex):
"""Safely attempts to match a pattern or string to a request origin."""
if isinstance(maybe_regex, RegexObject):
return re.match(maybe_regex, request_origin)
elif probably_regex(maybe_regex):
return re.match(maybe_regex, request_origin, flags=re.IGNORECASE)
else:
try:
return request_origin.lower() == maybe_regex.lower()
except AttributeError:
return request_origin == maybe_regex
def get_cors_options(appInstance, *dicts):
"""
Compute CORS options for an application by combining the DEFAULT_OPTIONS,
the app's configuration-specified options and any dictionaries passed. The
last specified option wins.
"""
options = DEFAULT_OPTIONS.copy()
options.update(get_app_kwarg_dict(appInstance))
if dicts:
for d in dicts:
options.update(d)
return serialize_options(options)
def get_app_kwarg_dict(appInstance):
"""Returns the dictionary of CORS specific app configurations."""
# In order to support blueprints which do not have a config attribute
app_config = getattr(appInstance, 'config', {})
return dict(
(k.lower().replace('cors_', ''), app_config.get(k))
for k in CONFIG_OPTIONS
if app_config.get(k) is not None
)
def flexible_str(obj):
"""
A more flexible str function which intelligently handles stringifying
strings, lists and other iterables. The results are lexographically sorted
to ensure generated responses are consistent when iterables such as Set
are used.
"""
if obj is None:
return None
elif(not isinstance(obj, str)
and isinstance(obj, collections.abc.Iterable)):
return ', '.join(str(item) for item in sorted(obj))
else:
return str(obj)
def serialize_option(options_dict, key, upper=False):
if key in options_dict:
value = flexible_str(options_dict[key])
options_dict[key] = value.upper() if upper else value
def ensure_iterable(inst):
"""
Wraps scalars or string types as a list, or returns the iterable instance.
"""
if isinstance(inst, str):
return [inst]
elif not isinstance(inst, collections.abc.Iterable):
return [inst]
else:
return inst
def sanitize_regex_param(param):
return [re_fix(x) for x in ensure_iterable(param)]
def serialize_options(opts):
"""
A helper method to serialize and processes the options dictionary.
"""
options = (opts or {}).copy()
for key in opts.keys():
if key not in DEFAULT_OPTIONS:
LOG.warning("Unknown option passed to Sanic-CORS: %s", key)
# Ensure origins is a list of allowed origins with at least one entry.
options['origins'] = sanitize_regex_param(options.get('origins'))
options['allow_headers'] = sanitize_regex_param(options.get('allow_headers'))
# This is expressly forbidden by the spec. Raise a value error so people
# don't get burned in production.
if r'.*' in options['origins'] and options['supports_credentials'] and options['send_wildcard']:
raise ValueError("Cannot use supports_credentials in conjunction with"
"an origin string of '*'. See: "
"http://www.w3.org/TR/cors/#resource-requests")
serialize_option(options, 'expose_headers')
serialize_option(options, 'methods', upper=True)
if isinstance(options.get('max_age'), timedelta):
options['max_age'] = str(int(options['max_age'].total_seconds()))
return options
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from functools import partial
from unittest.mock import MagicMock
import pytest
from indico.core import signals
from indico.core.db.sqlalchemy.principals import EmailPrincipal, PrincipalType
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.permissions import get_available_permissions
from indico.modules.events import Event
from indico.modules.events.models.principals import EventPrincipal
from indico.testing.util import bool_matrix
@pytest.fixture(autouse=True)
def _mock_available_permissions(mocker):
# The code we are testing only cares about the keys so we don't
# need to actually create permissions for now.
permissions = dict(get_available_permissions(Event), foo=None, bar=None, foobar=None)
mocker.patch('indico.core.db.sqlalchemy.protection.get_available_permissions', return_value=permissions)
mocker.patch('indico.core.db.sqlalchemy.principals.get_available_permissions', return_value=permissions)
@pytest.mark.usefixtures('request_context')
def test_remove_principal(db, create_event, create_user, dummy_user, dummy_group):
event = create_event()
event.update_principal(dummy_user, full_access=True)
event.update_principal(dummy_group, full_access=True)
db.session.flush()
assert {p.principal for p in event.acl_entries} == {dummy_user, dummy_group}
event.remove_principal(dummy_group)
assert {p.principal for p in event.acl_entries} == {dummy_user}
# removing some other user who's not in the acl doesn't do anything
event.remove_principal(create_user(123))
assert {p.principal for p in event.acl_entries} == {dummy_user}
@pytest.mark.usefixtures('request_context')
def test_update_principal(create_event, dummy_user):
event = create_event()
assert not event.acl_entries
# not changing anything -> shouldn't be added to acl
entry = event.update_principal(dummy_user)
assert entry is None
assert not event.acl_entries
# adding user with read access -> new acl entry since the user isn't in there yet
entry = initial_entry = event.update_principal(dummy_user, read_access=True)
assert not entry.permissions
assert not entry.full_access
assert entry.read_access
assert event.acl_entries == {entry}
# adding a permission
entry = event.update_principal(dummy_user, add_permissions={'foo'})
assert entry is initial_entry
assert set(entry.permissions) == {'foo'}
assert not entry.full_access
assert entry.read_access
# adding yet another permission
entry = event.update_principal(dummy_user, add_permissions={'foo', 'bar'})
assert entry is initial_entry
assert set(entry.permissions) == {'foo', 'bar'}
assert not entry.full_access
assert entry.read_access
# replacing the permissions
entry = event.update_principal(dummy_user, permissions={'bar', 'foobar'})
assert entry is initial_entry
assert set(entry.permissions) == {'bar', 'foobar'}
assert not entry.full_access
assert entry.read_access
# removing explicit read access but adding full manager access instead
entry = event.update_principal(dummy_user, read_access=False, full_access=True)
assert entry is initial_entry
assert set(entry.permissions) == {'bar', 'foobar'}
assert entry.full_access
assert not entry.read_access
# removing a permission
entry = event.update_principal(dummy_user, del_permissions={'foobar', 'foo'})
assert entry is initial_entry
assert set(entry.permissions) == {'bar'}
assert entry.full_access
assert not entry.read_access
# removing the remaining access
entry = event.update_principal(dummy_user, del_permissions={'bar'}, full_access=False)
assert entry is None
assert not event.acl_entries
@pytest.mark.usefixtures('request_context')
def test_update_principal_signal(create_event, dummy_user):
event = create_event()
calls = []
def _signal_fn(sender, **kwargs):
assert not calls
calls.append(kwargs)
with signals.acl.entry_changed.connected_to(_signal_fn, sender=Event):
# not in acl
event.remove_principal(dummy_user)
assert not calls
# not added
event.update_principal(dummy_user)
assert not calls
# adding new entry
entry = event.update_principal(dummy_user, full_access=True, permissions={'foo'})
call = calls.pop()
assert call['is_new']
assert call['obj'] is event
assert call['principal'] == dummy_user
assert call['entry'] == entry
assert call['old_data'] == {'read_access': False, 'full_access': False, 'permissions': set()}
assert not call['quiet']
# updating entry
event.update_principal(dummy_user, add_permissions={'bar'})
call = calls.pop()
assert not call['is_new']
assert call['obj'] is event
assert call['principal'] == dummy_user
assert call['entry'] == entry
assert call['old_data'] == {'read_access': False, 'full_access': True, 'permissions': {'foo'}}
assert not call['quiet']
# removing entry + quiet
event.update_principal(dummy_user, full_access=False, permissions=set(), quiet=True)
call = calls.pop()
assert not call['is_new']
assert call['obj'] is event
assert call['principal'] == dummy_user
assert call['entry'] is None
assert call['old_data'] == {'read_access': False, 'full_access': True, 'permissions': {'foo', 'bar'}}
assert call['quiet']
@pytest.mark.usefixtures('request_context')
def test_update_principal_resolve_email(create_event, create_user):
event = create_event()
user = create_user(123, email='user@example.com')
# add email that belongs to a user
entry = event.update_principal(EmailPrincipal('user@example.com'), full_access=True)
assert entry.principal == user
assert entry.type == PrincipalType.user
# add email that has no user associated
entry = event.update_principal(EmailPrincipal('unknown@example.com'), full_access=True)
assert entry.principal == EmailPrincipal('unknown@example.com')
assert entry.type == PrincipalType.email
@pytest.mark.usefixtures('request_context')
def test_convert_email_principals(db, create_event, create_user, dummy_user):
event = create_event()
user = create_user(123, email='user@example.com')
principal = EmailPrincipal('unknown@example.com')
other_entry = event.update_principal(dummy_user, full_access=True, permissions={'foo', 'foobar'})
entry = event.update_principal(principal, read_access=True, permissions={'foo', 'bar'})
other_entry_data = other_entry.current_data
entry_data = entry.current_data
# different emails for now -> nothing updated
assert not EventPrincipal.replace_email_with_user(user, 'event')
assert set(event.acl_entries) == {entry, other_entry}
user.secondary_emails.add(principal.email)
assert EventPrincipal.replace_email_with_user(user, 'event') == {event}
assert set(event.acl_entries) == {entry, other_entry}
assert all(x.type == PrincipalType.user for x in event.acl_entries)
db.session.expire(other_entry)
db.session.expire(entry)
assert entry.current_data == entry_data
assert other_entry.current_data == other_entry_data
@pytest.mark.usefixtures('request_context')
def test_convert_email_principals_merge(db, create_event, create_user):
event = create_event()
user = create_user(123, email='user@example.com')
principal = EmailPrincipal('unknown@example.com')
entry1 = event.update_principal(user, full_access=True, permissions={'foo', 'foobar'})
entry2 = event.update_principal(principal, read_access=True, permissions={'foo', 'bar'})
# different emails for now -> nothing updated
assert not EventPrincipal.replace_email_with_user(user, 'event')
assert set(event.acl_entries) == {entry1, entry2}
user.secondary_emails.add(principal.email)
assert EventPrincipal.replace_email_with_user(user, 'event') == {event}
assert len(event.acl_entries) == 1
entry = list(event.acl_entries)[0]
assert entry.full_access
assert entry.read_access
assert set(entry.permissions) == {'foo', 'bar', 'foobar'}
def test_update_principal_errors(create_event, dummy_user):
event = create_event()
with pytest.raises(ValueError):
event.update_principal(dummy_user, permissions={'foo'}, add_permissions={'bar'})
with pytest.raises(ValueError):
event.update_principal(dummy_user, permissions={'foo'}, del_permissions={'bar'})
with pytest.raises(ValueError):
event.update_principal(dummy_user, permissions={'invalid'})
def test_can_access_key_outside_context(create_event):
event = create_event(protection_mode=ProtectionMode.protected, access_key='12345')
assert not event.can_access(None)
def test_check_access_key(create_event):
event = create_event(protection_mode=ProtectionMode.protected, access_key='12345')
assert not event.check_access_key('foobar')
assert event.check_access_key('12345')
@pytest.mark.usefixtures('request_context')
def test_can_access_key(create_event):
event = create_event(protection_mode=ProtectionMode.protected, access_key='12345')
assert not event.can_access(None)
event.set_session_access_key('12345')
assert event.can_access(None)
event.set_session_access_key('foobar')
assert not event.can_access(None)
# make sure we never accept empty access keys
event.access_key = ''
event.set_session_access_key('')
assert not event.can_access(None)
@pytest.mark.usefixtures('request_context')
def test_can_manage_permissions(create_event, dummy_user):
event = create_event()
assert not event.can_manage(dummy_user, permission='ANY')
event.update_principal(dummy_user, permissions={'foo'})
assert not event.can_manage(dummy_user)
assert not event.can_manage(dummy_user, permission='bar')
assert event.can_manage(dummy_user, permission='foo')
assert event.can_manage(dummy_user, permission='ANY')
@pytest.mark.parametrize(('signal_rv_1', 'signal_rv_2', 'allowed'), (
(False, False, False),
(False, True, False),
(True, True, True)
))
def test_can_manage_signal_override(create_event, dummy_user, signal_rv_1, signal_rv_2, allowed):
event = create_event()
def _signal_fn(sender, obj, user, rv, **kwargs):
assert obj is event
assert user is dummy_user
return rv
with signals.acl.can_manage.connected_to(partial(_signal_fn, rv=signal_rv_1), sender=Event):
with signals.acl.can_manage.connected_to(partial(_signal_fn, rv=signal_rv_2), sender=Event):
assert event.can_manage(dummy_user) == allowed
@pytest.mark.parametrize(('signal_rv_1', 'signal_rv_2', 'allowed'), (
(False, False, False),
(False, True, False),
(True, True, True)
))
def test_can_access_signal_override(create_event, dummy_user, signal_rv_1, signal_rv_2, allowed):
event = create_event()
def _signal_fn(sender, obj, user, rv, **kwargs):
assert obj is event
assert user is dummy_user
return rv
with signals.acl.can_access.connected_to(partial(_signal_fn, rv=signal_rv_1), sender=Event):
with signals.acl.can_access.connected_to(partial(_signal_fn, rv=signal_rv_2), sender=Event):
assert event.can_access(dummy_user) == allowed
def test_can_access_signal_override_calls(create_event, dummy_user):
event = create_event()
def _signal_fn_noreturn(sender, authorized, **kwargs):
calls.append(authorized)
def _signal_fn_early(sender, authorized, **kwargs):
calls.append(authorized)
return True if authorized is None else None
def _signal_fn_late(sender, authorized, **kwargs):
calls.append(authorized)
return True if authorized is not None else None
# early check - signal only invoked once since we return something
calls = []
with signals.acl.can_access.connected_to(_signal_fn_early, sender=Event):
event.can_access(dummy_user)
assert calls == [None]
# signal invoked twice (nothing returned)
calls = []
with signals.acl.can_access.connected_to(_signal_fn_noreturn, sender=Event):
event.can_access(dummy_user)
assert calls == [None, True]
# late check - signal invoked twice, once with the regular access state
calls = []
with signals.acl.can_access.connected_to(_signal_fn_late, sender=Event):
event.can_access(dummy_user)
assert calls == [None, True]
# late check - signal invoked twice, once with the regular access state
calls = []
event.protection_mode = ProtectionMode.protected
with signals.acl.can_access.connected_to(_signal_fn_late, sender=Event):
event.can_access(dummy_user)
assert calls == [None, False]
@pytest.mark.parametrize(('is_admin', 'allow_admin', 'not_explicit', 'expected'), bool_matrix('...', expect=all))
def test_can_manage_admin(create_event, create_user, is_admin, allow_admin, not_explicit, expected):
event = create_event()
user = create_user(123, admin=is_admin)
assert event.can_manage(user, allow_admin=allow_admin, explicit_permission=not not_explicit) == expected
def test_can_manage_guest(create_event, dummy_category):
event = create_event()
# we grant explicit management access on the parent to ensure that
# we don't even check there but bail out early
event.category = dummy_category
event.category.can_manage = MagicMock(return_value=True)
assert not event.can_manage(None)
@pytest.mark.parametrize('can_manage_parent', (True, False))
def test_can_manage_parent(create_event, dummy_category, dummy_user, can_manage_parent):
event = create_event()
event.category = dummy_category
event.category.can_manage = MagicMock(return_value=can_manage_parent)
assert event.can_manage(dummy_user) == can_manage_parent
event.category.can_manage.assert_called_once_with(dummy_user, allow_admin=True)
def test_can_manage_parent_invalid(create_event, dummy_user):
event = create_event()
event.__dict__['category'] = MagicMock(spec=[])
with pytest.raises(TypeError):
event.can_manage(dummy_user)
def test_can_manage_permissions_invalid(create_event, dummy_user):
event = create_event()
with pytest.raises(ValueError):
event.can_manage(dummy_user, permission='invalid')
def test_merge_privs():
p = EventPrincipal(read_access=True, permissions={'foo', 'bar'})
p.merge_privs(EventPrincipal(permissions={'bar', 'foobar'}, full_access=True))
assert p.read_access
assert p.full_access
assert set(p.permissions) == {'foo', 'bar', 'foobar'}
def test_has_management_permission_full_access():
p = EventPrincipal(full_access=True, permissions=[])
assert p.has_management_permission()
assert p.has_management_permission('foo')
assert p.has_management_permission('ANY')
@pytest.mark.usefixtures('request_context')
def test_has_management_permission_full_access_db(create_event, dummy_user, create_user):
event = create_event()
event.update_principal(create_user(123), permissions={'bar'})
entry = event.update_principal(dummy_user, full_access=True)
def _query(*args):
return (EventPrincipal.query
.filter(EventPrincipal.event == event,
EventPrincipal.has_management_permission(*args)))
assert _query().one() == entry
assert _query('foo').one() == entry
assert _query('ANY').count() == 2
def test_has_management_permission_no_access():
p = EventPrincipal(read_access=True, permissions=[])
assert not p.has_management_permission()
assert not p.has_management_permission('foo')
assert not p.has_management_permission('ANY')
@pytest.mark.usefixtures('request_context')
def test_has_management_permission_no_access_db(create_event, dummy_user):
event = create_event()
event.update_principal(dummy_user, read_access=True)
def _query(*args):
return (EventPrincipal.query
.filter(EventPrincipal.event == event,
EventPrincipal.has_management_permission(*args)))
assert not _query().count()
assert not _query('foo').count()
assert not _query('ANY').count()
@pytest.mark.parametrize('explicit', (True, False))
def test_has_management_permission_explicit(explicit):
p = EventPrincipal(full_access=True, permissions=['foo'])
assert p.has_management_permission('foo', explicit=explicit)
assert p.has_management_permission('ANY', explicit=explicit)
assert p.has_management_permission('bar', explicit=explicit) == (not explicit)
assert (EventPrincipal(full_access=True, permissions=[]).has_management_permission('ANY', explicit=explicit) ==
(not explicit))
@pytest.mark.parametrize('explicit', (True, False))
@pytest.mark.usefixtures('request_context')
def test_has_management_permission_explicit_db(create_event, dummy_user, create_user, explicit):
event = create_event()
event.update_principal(create_user(123), full_access=True)
event.update_principal(dummy_user, full_access=True, permissions={'foo'})
def _query(permission):
return (EventPrincipal.query
.filter(EventPrincipal.event == event,
EventPrincipal.has_management_permission(permission, explicit=explicit)))
assert _query('foo').count() == (1 if explicit else 2)
assert _query('bar').count() == (0 if explicit else 2)
assert _query('ANY').count() == (1 if explicit else 2)
def test_has_management_permission_explicit_fail():
p = EventPrincipal(permissions=['foo'])
# no permission specified
with pytest.raises(ValueError):
p.has_management_permission(explicit=True)
with pytest.raises(ValueError):
EventPrincipal.has_management_permission(explicit=True)
def test_has_management_permission():
p = EventPrincipal(permissions=['foo'])
assert p.has_management_permission('ANY')
assert p.has_management_permission('foo')
assert not p.has_management_permission('bar')
@pytest.mark.usefixtures('request_context')
def test_has_management_permission_db(create_event, create_user, dummy_user):
event = create_event()
event.update_principal(create_user(123), permissions={'bar'})
entry = event.update_principal(dummy_user, permissions={'foo'})
def _query(*args):
return (EventPrincipal.query
.filter(EventPrincipal.event == event,
EventPrincipal.has_management_permission(*args)))
assert not _query().count()
assert _query('foo').one() == entry
assert _query('ANY').count() == 2
|
|
import os
from unittest import TestCase
from unittest.mock import Mock
import pandora.clientbuilder as cb
from pandora.client import APIClient
from pandora.transport import DEFAULT_API_HOST
class TestTranslatingDict(TestCase):
class TestDict(cb.TranslatingDict):
KEY_TRANSLATIONS = {"FOO": "BAR"}
VALUE_TRANSLATIONS = {"BAZ": lambda v: v + 1}
callback_value = None
def was_translated(self, from_key, to_key):
self.callback_value = (from_key, to_key)
def setUp(self):
self.dct = self.TestDict()
def test_construction_with_dict(self):
dct = self.TestDict({"BIZ": 1, "BUZ": 2})
self.assertEqual(1, dct["BIZ"])
self.assertEqual(2, dct["BUZ"])
def test_construction_with_list(self):
dct = self.TestDict([("key", "value")])
self.assertEqual("value", dct["KEY"])
def test_key_translation(self):
self.dct.put(" TEST ", "value")
self.dct.put("MoRe", 1)
self.dct.put("foo", True)
self.assertEqual("value", self.dct["TEST"])
self.assertEqual(1, self.dct["MORE"])
self.assertEqual(True, self.dct["BAR"])
def test_value_translation(self):
dct = self.TestDict({" Baz": 41})
self.assertEqual(42, dct["BAZ"])
def test_setitem(self):
self.dct["Foo"] = "bar"
self.assertEqual("bar", self.dct["BAR"])
def test_put(self):
self.dct.put("Foo", "bar")
self.assertEqual("bar", self.dct["BAR"])
def test_key_translation_hook(self):
self.dct.put("Foo", "bar")
self.assertEqual(("FOO", "BAR"), self.dct.callback_value)
class TestSettingsDictBuilder(TestCase):
@classmethod
def _build_minimal(self):
return cb.SettingsDictBuilder(
{
"DECRYPTION_KEY": "blowfishkey",
"ENCRYPTION_KEY": "blowfishkey",
"PARTNER_USER": "user",
"PARTNER_PASSWORD": "pass",
"DEVICE": "dev",
}
).build()
@classmethod
def _build_maximal(self):
return cb.SettingsDictBuilder(
{
"DECRYPTION_KEY": "blowfishkey",
"ENCRYPTION_KEY": "blowfishkey",
"PARTNER_USER": "user",
"PARTNER_PASSWORD": "pass",
"DEVICE": "dev",
"PROXY": "proxy.example.com",
"AUDIO_QUALITY": "high",
"API_HOST": "example.com",
}
).build()
def test_building(self):
client = TestSettingsDictBuilder._build_minimal()
self.assertTrue(isinstance(client, APIClient))
def test_default_values(self):
client = TestSettingsDictBuilder._build_minimal()
self.assertEqual({}, client.transport._http.proxies)
self.assertEqual(DEFAULT_API_HOST, client.transport.api_host)
self.assertEqual(
APIClient.MED_AUDIO_QUALITY, client.default_audio_quality
)
def test_validate_client(self):
client = TestSettingsDictBuilder._build_maximal()
expected_proxies = {
"http": "proxy.example.com",
"https": "proxy.example.com",
}
self.assertIsNotNone(client.transport.cryptor.bf_in)
self.assertIsNotNone(client.transport.cryptor.bf_out)
self.assertEqual("user", client.partner_user)
self.assertEqual("pass", client.partner_password)
self.assertEqual("dev", client.device)
self.assertEqual(expected_proxies, client.transport._http.proxies)
self.assertEqual("example.com", client.transport.api_host)
self.assertEqual("high", client.default_audio_quality)
class TestFileBasedBuilder(TestCase):
class StubBuilder(cb.FileBasedClientBuilder):
DEFAULT_CONFIG_FILE = "foo"
def parse_config(self):
return {"USER": {"USERNAME": "U", "PASSWORD": "P"}}
def build_from_settings_dict(self, config):
mock = Mock()
mock.login = Mock()
return mock
def test_default_config(self):
builder = self.StubBuilder()
self.assertEqual("foo", builder.path)
def test_setting_valid_path(self):
builder = cb.FileBasedClientBuilder(__file__)
self.assertTrue(builder.file_exists)
self.assertEqual(__file__, builder.path)
def test_setting_invalid_path(self):
builder = cb.FileBasedClientBuilder("nowhere")
with self.assertRaises(IOError):
builder.build()
self.assertFalse(builder.file_exists)
def test_setting_user_path(self):
builder = cb.FileBasedClientBuilder("~/")
self.assertEqual(os.path.expanduser("~/"), builder.path)
def test_logging_in(self):
client = self.StubBuilder(__file__, True).build()
client.login.assert_called_once_with("U", "P")
def test_not_logging_in(self):
client = self.StubBuilder(__file__, False).build()
self.assertFalse(client.login.called)
def test_abstract_class_does_not_parse_config(self):
with self.assertRaises(NotImplementedError):
cb.FileBasedClientBuilder().parse_config()
class TestPydoraConfigFileBuilder(TestCase):
def test_cfg_to_dict(self):
cfg = Mock()
cfg.items = Mock(return_value=[("a", "b"), ("c", "d")])
dct = cb.PydoraConfigFileBuilder.cfg_to_dict(cfg, "foo")
self.assertEqual("b", dct["A"])
self.assertEqual("d", dct["C"])
def test_integration(self):
path = os.path.join(os.path.dirname(__file__), "pydora.cfg")
cfg = cb.PydoraConfigFileBuilder(path).parse_config()
self.assertDictEqual(
cfg,
{
"AUDIO_QUALITY": "test_quality",
"DECRYPTION_KEY": "test_decryption_key",
"DEVICE": "test_device",
"ENCRYPTION_KEY": "test_encryption_key",
"PARTNER_PASSWORD": "test_partner_password",
"PARTNER_USER": "test_partner_username",
"API_HOST": "test_host",
"USER": {
"USERNAME": "test_username",
"PASSWORD": "test_password",
},
},
)
class TestPianobarConfigFileBuilder(TestCase):
def test_integration(self):
path = os.path.join(os.path.dirname(__file__), "pianobar.cfg")
cfg = cb.PianobarConfigFileBuilder(path).parse_config()
self.assertDictEqual(
cfg,
{
"AUDIO_QUALITY": "test_qualityQuality",
"DECRYPTION_KEY": "test_decryption_key",
"DEVICE": "test_device",
"ENCRYPTION_KEY": "test_encryption_key",
"PARTNER_PASSWORD": "test_partner_password",
"PARTNER_USER": "test_partner_username",
"API_HOST": "test_host/services/json/",
"PROXY": "test_proxy",
"USER": {
"USERNAME": "test_username",
"PASSWORD": "test_password",
},
},
)
|
|
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from warnings import catch_warnings
import unittest2 as unittest
from mock.tests.support import is_instance
from mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something:
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
# mock_open.return_value is no longer static, because
# readline support requires that it mutate state
def test_mock_open_context_manager_multiple_times(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
with open('bar') as f:
f.read()
expected_calls = [
call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None),
call('bar'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
def test_readline_data(self):
# Check that readline will return all the lines from the fake file
mock = mock_open(read_data='foo\nbar\nbaz\n')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
line1 = h.readline()
line2 = h.readline()
line3 = h.readline()
self.assertEqual(line1, 'foo\n')
self.assertEqual(line2, 'bar\n')
self.assertEqual(line3, 'baz\n')
# Check that we properly emulate a file that doesn't end in a newline
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.readline()
self.assertEqual(result, 'foo')
def test_readlines_data(self):
# Test that emulating a file that ends in a newline character works
mock = mock_open(read_data='foo\nbar\nbaz\n')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.readlines()
self.assertEqual(result, ['foo\n', 'bar\n', 'baz\n'])
# Test that files without a final newline will also be correctly
# emulated
mock = mock_open(read_data='foo\nbar\nbaz')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.readlines()
self.assertEqual(result, ['foo\n', 'bar\n', 'baz'])
def test_mock_open_read_with_argument(self):
# At one point calling read with an argument was broken
# for mocks returned by mock_open
some_data = 'foo\nbar\nbaz'
mock = mock_open(read_data=some_data)
self.assertEqual(mock().read(10), some_data)
def test_interleaved_reads(self):
# Test that calling read, readline, and readlines pulls data
# sequentially from the data we preload with
mock = mock_open(read_data='foo\nbar\nbaz\n')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
line1 = h.readline()
rest = h.readlines()
self.assertEqual(line1, 'foo\n')
self.assertEqual(rest, ['bar\n', 'baz\n'])
mock = mock_open(read_data='foo\nbar\nbaz\n')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
line1 = h.readline()
rest = h.read()
self.assertEqual(line1, 'foo\n')
self.assertEqual(rest, 'bar\nbaz\n')
def test_overriding_return_values(self):
mock = mock_open(read_data='foo')
handle = mock()
handle.read.return_value = 'bar'
handle.readline.return_value = 'bar'
handle.readlines.return_value = ['bar']
self.assertEqual(handle.read(), 'bar')
self.assertEqual(handle.readline(), 'bar')
self.assertEqual(handle.readlines(), ['bar'])
# call repeatedly to check that a StopIteration is not propagated
self.assertEqual(handle.readline(), 'bar')
self.assertEqual(handle.readline(), 'bar')
if __name__ == '__main__':
unittest.main()
|
|
# helper.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import print_function
import os
import sys
from unittest import TestCase
import time
import tempfile
import shutil
import io
from git import Repo, Remote, GitCommandError, Git
from git.compat import string_types
GIT_REPO = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
__all__ = (
'fixture_path', 'fixture', 'absolute_project_path', 'StringProcessAdapter',
'with_rw_repo', 'with_rw_and_rw_remote_repo', 'TestBase', 'TestCase', 'GIT_REPO'
)
#{ Routines
def fixture_path(name):
test_dir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(test_dir, "fixtures", name)
def fixture(name):
return open(fixture_path(name), 'rb').read()
def absolute_project_path():
return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
#} END routines
#{ Adapters
class StringProcessAdapter(object):
"""Allows to use strings as Process object as returned by SubProcess.Popen.
Its tailored to work with the test system only"""
def __init__(self, input_string):
self.stdout = io.BytesIO(input_string)
self.stderr = io.BytesIO()
def wait(self):
return 0
poll = wait
#} END adapters
#{ Decorators
def _mktemp(*args):
"""Wrapper around default tempfile.mktemp to fix an osx issue
:note: the OSX special case was removed as it was unclear why that was needed in the first place. It seems
to be just fine without it. However, if we leave this special case, and if TMPDIR is set to something custom,
prefixing /private/ will lead to incorrect paths on OSX."""
tdir = tempfile.mktemp(*args)
# See :note: above to learn why this is comented out.
# if sys.platform == 'darwin':
# tdir = '/private' + tdir
return tdir
def _rmtree_onerror(osremove, fullpath, exec_info):
"""
Handle the case on windows that read-only files cannot be deleted by
os.remove by setting it to mode 777, then retry deletion.
"""
if os.name != 'nt' or osremove is not os.remove:
raise
os.chmod(fullpath, 0o777)
os.remove(fullpath)
def with_rw_repo(working_tree_ref, bare=False):
"""
Same as with_bare_repo, but clones the rorepo as non-bare repository, checking
out the working tree at the given working_tree_ref.
This repository type is more costly due to the working copy checkout.
To make working with relative paths easier, the cwd will be set to the working
dir of the repository.
"""
assert isinstance(working_tree_ref, string_types), "Decorator requires ref name for working tree checkout"
def argument_passer(func):
def repo_creator(self):
prefix = 'non_'
if bare:
prefix = ''
# END handle prefix
repo_dir = _mktemp("%sbare_%s" % (prefix, func.__name__))
rw_repo = self.rorepo.clone(repo_dir, shared=True, bare=bare, n=True)
rw_repo.head.commit = rw_repo.commit(working_tree_ref)
if not bare:
rw_repo.head.reference.checkout()
# END handle checkout
prev_cwd = os.getcwd()
os.chdir(rw_repo.working_dir)
try:
try:
return func(self, rw_repo)
except:
print("Keeping repo after failure: %s" % repo_dir, file=sys.stderr)
repo_dir = None
raise
finally:
os.chdir(prev_cwd)
rw_repo.git.clear_cache()
if repo_dir is not None:
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
# END rm test repo if possible
# END cleanup
# END rw repo creator
repo_creator.__name__ = func.__name__
return repo_creator
# END argument passer
return argument_passer
def with_rw_and_rw_remote_repo(working_tree_ref):
"""
Same as with_rw_repo, but also provides a writable remote repository from which the
rw_repo has been forked as well as a handle for a git-daemon that may be started to
run the remote_repo.
The remote repository was cloned as bare repository from the rorepo, wheras
the rw repo has a working tree and was cloned from the remote repository.
remote_repo has two remotes: origin and daemon_origin. One uses a local url,
the other uses a server url. The daemon setup must be done on system level
and should be an inetd service that serves tempdir.gettempdir() and all
directories in it.
The following scetch demonstrates this::
rorepo ---<bare clone>---> rw_remote_repo ---<clone>---> rw_repo
The test case needs to support the following signature::
def case(self, rw_repo, rw_remote_repo)
This setup allows you to test push and pull scenarios and hooks nicely.
See working dir info in with_rw_repo
:note: We attempt to launch our own invocation of git-daemon, which will be shutdown at the end of the test.
"""
assert isinstance(working_tree_ref, string_types), "Decorator requires ref name for working tree checkout"
def argument_passer(func):
def remote_repo_creator(self):
remote_repo_dir = _mktemp("remote_repo_%s" % func.__name__)
repo_dir = _mktemp("remote_clone_non_bare_repo")
rw_remote_repo = self.rorepo.clone(remote_repo_dir, shared=True, bare=True)
# recursive alternates info ?
rw_repo = rw_remote_repo.clone(repo_dir, shared=True, bare=False, n=True)
rw_repo.head.commit = working_tree_ref
rw_repo.head.reference.checkout()
# prepare for git-daemon
rw_remote_repo.daemon_export = True
# this thing is just annoying !
crw = rw_remote_repo.config_writer()
section = "daemon"
try:
crw.add_section(section)
except Exception:
pass
crw.set(section, "receivepack", True)
# release lock
crw.release()
del(crw)
# initialize the remote - first do it as local remote and pull, then
# we change the url to point to the daemon. The daemon should be started
# by the user, not by us
d_remote = Remote.create(rw_repo, "daemon_origin", remote_repo_dir)
d_remote.fetch()
remote_repo_url = "git://localhost%s" % remote_repo_dir
d_remote.config_writer.set('url', remote_repo_url)
temp_dir = os.path.dirname(_mktemp())
# On windows, this will fail ... we deal with failures anyway and default to telling the user to do it
try:
gd = Git().daemon(temp_dir, enable='receive-pack', as_process=True)
# yes, I know ... fortunately, this is always going to work if sleep time is just large enough
time.sleep(0.5)
except Exception:
gd = None
# end
# try to list remotes to diagnoes whether the server is up
try:
rw_repo.git.ls_remote(d_remote)
except GitCommandError as e:
# We assume in good faith that we didn't start the daemon - but make sure we kill it anyway
# Of course we expect it to work here already, but maybe there are timing constraints
# on some platforms ?
if gd is not None:
os.kill(gd.proc.pid, 15)
print(str(e))
if os.name == 'nt':
msg = "git-daemon needs to run this test, but windows does not have one. "
msg += 'Otherwise, run: git-daemon "%s"' % temp_dir
raise AssertionError(msg)
else:
msg = 'Please start a git-daemon to run this test, execute: git daemon --enable=receive-pack "%s"'
msg %= temp_dir
raise AssertionError(msg)
# END make assertion
# END catch ls remote error
# adjust working dir
prev_cwd = os.getcwd()
os.chdir(rw_repo.working_dir)
try:
return func(self, rw_repo, rw_remote_repo)
finally:
# gd.proc.kill() ... no idea why that doesn't work
if gd is not None:
os.kill(gd.proc.pid, 15)
os.chdir(prev_cwd)
rw_repo.git.clear_cache()
rw_remote_repo.git.clear_cache()
shutil.rmtree(repo_dir, onerror=_rmtree_onerror)
shutil.rmtree(remote_repo_dir, onerror=_rmtree_onerror)
if gd is not None:
gd.proc.wait()
# END cleanup
# END bare repo creator
remote_repo_creator.__name__ = func.__name__
return remote_repo_creator
# END remote repo creator
# END argument parsser
return argument_passer
#} END decorators
class TestBase(TestCase):
"""
Base Class providing default functionality to all tests such as:
- Utility functions provided by the TestCase base of the unittest method such as::
self.fail("todo")
self.failUnlessRaises(...)
- Class level repository which is considered read-only as it is shared among
all test cases in your type.
Access it using::
self.rorepo # 'ro' stands for read-only
The rorepo is in fact your current project's git repo. If you refer to specific
shas for your objects, be sure you choose some that are part of the immutable portion
of the project history ( to assure tests don't fail for others ).
"""
def _small_repo_url(self):
""":return" a path to a small, clonable repository"""
return os.path.join(self.rorepo.working_tree_dir, 'git/ext/gitdb/gitdb/ext/smmap')
@classmethod
def setUpClass(cls):
"""
Dynamically add a read-only repository to our actual type. This way
each test type has its own repository
"""
cls.rorepo = Repo(GIT_REPO)
@classmethod
def tearDownClass(cls):
cls.rorepo.git.clear_cache()
cls.rorepo.git = None
def _make_file(self, rela_path, data, repo=None):
"""
Create a file at the given path relative to our repository, filled
with the given data. Returns absolute path to created file.
"""
repo = repo or self.rorepo
abs_path = os.path.join(repo.working_tree_dir, rela_path)
fp = open(abs_path, "w")
fp.write(data)
fp.close()
return abs_path
|
|
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import math
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class TransformTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
sphere = IECore.SpherePrimitive()
input = GafferSceneTest.CompoundObjectSource()
input["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"group" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"children" : {
"sphere" : {
"bound" : IECore.Box3fData( sphere.bound() ),
"object" : sphere,
},
},
},
},
} ),
)
self.assertSceneValid( input["out"] )
transform = GafferScene.Transform()
transform["in"].setInput( input["out"] )
# by default transform should do nothing
self.assertSceneValid( transform["out"] )
self.assertScenesEqual( transform["out"], input["out"] )
# even when setting a transform it should do nothing, as
# it requires a filter before operating (applying the same transform
# at every location is really not very useful).
transform["transform"]["translate"].setValue( IECore.V3f( 1, 2, 3 ) )
self.assertSceneValid( transform["out"] )
self.assertScenesEqual( transform["out"], input["out"] )
# applying a filter should cause things to happen
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
transform["filter"].setInput( filter["out"] )
self.assertSceneValid( transform["out"] )
self.assertEqual( transform["out"].transform( "/group/sphere" ), IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) ) )
self.assertEqual( transform["out"].transform( "/group" ), IECore.M44f() )
self.assertEqual( transform["out"].bound( "/group/sphere" ), IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) ) )
self.assertEqual( transform["out"].bound( "/group" ), IECore.Box3f( IECore.V3f( 0, 1, 2 ), IECore.V3f( 2, 3, 4 ) ) )
self.assertEqual( transform["out"].bound( "/" ), IECore.Box3f( IECore.V3f( 0, 1, 2 ), IECore.V3f( 2, 3, 4 ) ) )
def testEnableBehaviour( self ) :
t = GafferScene.Transform()
self.assertTrue( t.enabledPlug().isSame( t["enabled"] ) )
self.assertTrue( t.correspondingInput( t["out"] ).isSame( t["in"] ) )
self.assertEqual( t.correspondingInput( t["in"] ), None )
self.assertEqual( t.correspondingInput( t["enabled"] ), None )
def testSpace( self ) :
sphere = GafferScene.Sphere()
sphere["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
transform = GafferScene.Transform()
transform["in"].setInput( sphere["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
transform["filter"].setInput( filter["out"] )
self.assertEqual( transform["space"].getValue(), GafferScene.Transform.Space.Local )
transform["transform"]["rotate"]["y"].setValue( 90 )
self.assertSceneValid( transform["out"] )
self.assertTrue(
IECore.V3f( 1, 0, 0 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/sphere" ),
0.000001
)
)
transform["space"].setValue( GafferScene.Transform.Space.Parent )
self.assertTrue(
IECore.V3f( 0, 0, -1 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/sphere" ),
0.000001
)
)
transform["space"].setValue( GafferScene.Transform.Space.World )
self.assertTrue(
IECore.V3f( 0, 0, -1 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/sphere" ),
0.000001
)
)
def testSpaceWithNestedHierarchy( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
transform = GafferScene.Transform()
transform["in"].setInput( group["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
transform["filter"].setInput( filter["out"] )
self.assertEqual( transform["space"].getValue(), GafferScene.Transform.Space.Local )
self.assertSceneValid( transform["out"] )
self.assertTrue(
IECore.V3f( 1, 0, 0 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
transform["space"].setValue( GafferScene.Transform.Space.Parent )
self.assertSceneValid( transform["out"] )
self.assertTrue(
IECore.V3f( 1, 0, 0 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
transform["space"].setValue( GafferScene.Transform.Space.World )
transform["transform"]["rotate"]["y"].setValue( 90 )
self.assertTrue(
IECore.V3f( 0, 0, -1 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
def testResetLocal( self ) :
sphere = GafferScene.Sphere()
sphere["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
transform = GafferScene.Transform()
transform["in"].setInput( group["out"] )
transform["transform"]["rotate"]["y"].setValue( 90 )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
transform["filter"].setInput( filter["out"] )
transform["space"].setValue( GafferScene.Transform.Space.ResetLocal )
self.assertSceneValid( transform["out"] )
self.assertTrue(
IECore.V3f( 1, 0, 0 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
self.assertTrue(
IECore.V3f( 2, 0, 0 ).equalWithAbsError(
IECore.V3f( 0, 0, 1 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
self.assertEqual(
transform["out"].transform( "/group/sphere" ),
IECore.M44f.createRotated( IECore.V3f( 0, math.radians( 90 ), 0 ) )
)
def testResetWorld( self ) :
sphere = GafferScene.Sphere()
sphere["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["transform"]["translate"].setValue( IECore.V3f( 1, 0, 0 ) )
transform = GafferScene.Transform()
transform["in"].setInput( group["out"] )
transform["transform"]["rotate"]["y"].setValue( 90 )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
transform["filter"].setInput( filter["out"] )
transform["space"].setValue( GafferScene.Transform.Space.ResetWorld )
self.assertSceneValid( transform["out"] )
self.assertTrue(
IECore.V3f( 0, 0, 0 ).equalWithAbsError(
IECore.V3f( 0 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
self.assertTrue(
IECore.V3f( 1, 0, 0 ).equalWithAbsError(
IECore.V3f( 0, 0, 1 ) * transform["out"].fullTransform( "/group/sphere" ),
0.000001
)
)
self.assertEqual(
transform["out"].fullTransform( "/group/sphere" ),
IECore.M44f.createRotated( IECore.V3f( 0, math.radians( 90 ), 0 ) )
)
def testWorldWithMatchingAncestors( self ) :
b = GafferScene.Sphere()
b["name"].setValue( "b" )
a = GafferScene.Group()
a["in"][0].setInput( b["out"] )
a["name"].setValue( "a" )
t = GafferScene.Transform()
t["in"].setInput( b["out"] )
t["transform"]["translate"].setValue( IECore.V3f( 1, 2, 3 ) )
t["space"].setValue( t.Space.World )
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/a", "/a/b" ] ) )
t["filter"].setInput( f["out"] )
self.assertSceneValid( t["out"] )
self.assertEqual(
t["out"].fullTransform( "/a" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
# We want it to be as if /a/b has been transformed
# independently in world space, and not inherit the
# additional transform also applied to /a.
self.assertEqual(
t["out"].fullTransform( "/a/b" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
b["transform"]["translate"].setValue( IECore.V3f( 4, 5, 6 ) )
self.assertSceneValid( t["out"] )
self.assertEqual(
t["out"].fullTransform( "/a/b" ),
IECore.M44f.createTranslated( IECore.V3f( 5, 7, 9 ) )
)
def testResetWorldWithMatchingAncestors( self ) :
c = GafferScene.Sphere()
c["name"].setValue( "c" )
b = GafferScene.Group()
b["in"][0].setInput( c["out"] )
b["name"].setValue( "b" )
a = GafferScene.Group()
a["in"][0].setInput( b["out"] )
a["name"].setValue( "a" )
t = GafferScene.Transform()
t["in"].setInput( a["out"] )
t["transform"]["translate"].setValue( IECore.V3f( 1, 2, 3 ) )
t["space"].setValue( t.Space.ResetWorld )
# Apply to /a and /a/b/c so that we must take into
# account the changing parent transform of /a/b/c
# to get it's absolute position in world space
# right.
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/a", "/a/b/c" ] ) )
t["filter"].setInput( f["out"] )
# Check that we're good.
self.assertSceneValid( t["out"] )
self.assertEqual(
t["out"].fullTransform( "/a" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
self.assertEqual(
t["out"].fullTransform( "/a/b" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
self.assertEqual(
t["out"].fullTransform( "/a/b/c" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
# Change the transform on /a/b, and check that it is
# retained, but that /a/b/c adjusts for it and maintains
# the required absolute transform.
b["transform"]["translate"].setValue( IECore.V3f( 9, 7, 5 ) )
self.assertSceneValid( t["out"] )
self.assertEqual(
t["out"].fullTransform( "/a" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
self.assertEqual(
t["out"].fullTransform( "/a/b" ),
IECore.M44f.createTranslated( IECore.V3f( 10, 9, 8 ) )
)
self.assertEqual(
t["out"].fullTransform( "/a/b/c" ),
IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) )
)
def testObjectBoundIncludedWhenDescendantsMatch( self ) :
s = GafferScene.Cube()
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/..." ] ) ) # the dread ellipsis!
t = GafferScene.Transform()
t["in"].setInput( s["out"] )
t["filter"].setInput( f["out"] )
t["transform"]["translate"].setValue( IECore.V3f( 1 ) )
self.assertSceneValid( t["out"] )
self.assertEqual( t["out"].bound( "/" ), IECore.Box3f( IECore.V3f( 0.5 ), IECore.V3f( 1.5 ) ) )
if __name__ == "__main__":
unittest.main()
|
|
import difflib
_file_template = """
<div id="diff_table_div">
%(summary)s
%(table)s
</div>
"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups">
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>
"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add">Extra</td></tr>
<tr><td class="diff_chg">Different</td> </tr>
<tr><td class="diff_sub">Missing</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
<tr><td>(s)how same region</td> </tr>
<tr><td>(h)ide same region</td> </tr>
</table></td> </tr>
</table>"""
MAX_NUM_REVEALS = 3
MAX_DIFF_LINES = 512
LINE_WRAP = 64
SOFT_MAX_LINE_LENGTH = 128
HARD_MAX_LINE_LENGTH = 1024
def limit_revealed_lines_to(diffs, limit, hide_expected):
def truncate_line(todata, expected_length):
max_length = min(max(expected_length, SOFT_MAX_LINE_LENGTH),
HARD_MAX_LINE_LENGTH)
if len(todata[1]) > max_length:
new = todata[1][:max_length] + '...truncated'
if todata[1].endswith('\n\x01'):
new += '\n\x01'
elif todata[1].endswith('\x01'):
new += '\x01'
return todata[0], new
else:
return todata
num_reveals = 0
different_lines = 0
obscured = '<<Expected output obscured by instructor.>>'
for fromdata, todata, flag in diffs:
if flag:
different_lines += 1
if '\0-' in fromdata[1] or '\0^' in fromdata[1]:
num_reveals += 1
if different_lines > MAX_DIFF_LINES or limit and num_reveals > limit:
trun = '...', '<<Remaining diff not shown>>'
yield trun, trun, False
break
todata = truncate_line(todata, len(fromdata[1]))
if hide_expected:
fromdata = fromdata[0], obscured
obscured = ''
yield fromdata, todata, flag
def change_same_starting_points(flaglist):
"""Gets points at which changes begin"""
change_points = []
same_points = []
in_change = False
if flaglist and not flaglist[0]:
same_points.append(0)
for x, flag in enumerate(flaglist):
if flag and not in_change:
change_points.append(x)
in_change = True
elif not flag and in_change:
same_points.append(x)
in_change = False
return (change_points, same_points)
class HTMLDiff(difflib.HtmlDiff):
FROM_DESC = 'Correct Output'
TO_DESC = 'Your Output'
TD_DIFF_HEADER = '<td class="diff_header"{0}>{1}</td>\
<td style="white-space:nowrap{2}">{3}</td>'
SHOW_HIDE_INSTRUMENTATION = "<p>" + \
"""\t<a href="javascript:void(0)" onclick="showAll(""" + \
"""'difflib_chg_{0}_top');">Show All</a>\n""" + \
"""\t<a href="javascript:void(0)" onclick="hideAll(""" + \
"""'difflib_chg_{0}_top');">Hide All</a>\n""" + \
"</p>"
FAILING_BLOCK = '\n'.join(['<div class="well well-small" id="{}">',
' <h4>{}: {}</h4>', ' {}', '</div>'])
NEXT_ID_CHANGE = ' id="difflib_chg_{0}_{1}"'
NEXT_HREF = '<a href="#difflib_chg_{0}_{1}">n</a>'
NEXT_HREF_TOP = '<a href="#difflib_chg_{0}_top">t</a>'
NEXT_ID_SAME = ' id="difflib_same_{0}{1}_{2}"'
SHOW_HIDE_ROWS = \
'<a href="javascript:void(0)" onclick="showHideRows(this);">h</a>'
NO_DIFFERENCES = '<td></td><td> No Differences Found </td>'
EMPTY_FILE = '<td></td><td> Empty File </td>'
MAX_SAME_LINES_BEFORE_SHOW_HIDE = 5 # must be >= 4
def __init__(self, points_possible=0, num_reveal_limit=MAX_NUM_REVEALS):
super(HTMLDiff, self).__init__(wrapcolumn=LINE_WRAP)
self._legend = _legend
self._table_template = _table_template
self._file_template = _file_template
self._last_collapsed = False
self._mapping = {} # maps a renderable to html
self._num_reveal_limit = num_reveal_limit
self._points_possible = points_possible
self._show_legend = False
def add_renderable(self, renderable):
value = renderable.custom_output
if renderable.show_diff_table():
self._show_legend = True
self._last_collapsed = False
table = self.make_table(renderable)
if self._last_collapsed:
show_hide = self.SHOW_HIDE_INSTRUMENTATION.format(
self._prefix[1])
table = '{0}{1}{0}'.format(show_hide, table)
value += table
name = renderable.name
issue = renderable.get_issue()
if issue:
name += ' -- {}'.format(issue)
self._mapping[renderable] = None if not (value or issue) else \
self.FAILING_BLOCK.format(renderable.id, renderable.group, name,
value)
def make_table(self, renderable):
"""Makes unique anchor prefixes so that multiple tables may exist
on the same page without conflict."""
self._make_prefix()
diffs = renderable.diff._diff
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs, renderable.diff.hide_expected)
# collect up from/to lines and flags into lists (also format the lines)
fromlist, tolist, flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist, tolist, flaglist, next_href, next_id = self._convert_flags(
fromlist, tolist, flaglist, False, 5)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append(fmt % (next_id[i], next_href[i], fromlist[i],
next_href[i], tolist[i]))
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % self.FROM_DESC,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % self.TO_DESC)
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+', '<span class="diff_add">'). \
replace('\0-', '<span class="diff_sub">'). \
replace('\0^', '<span class="diff_chg">'). \
replace('\1', '</span>'). \
replace('\t', ' ')
def _format_line(self, side, flag, linenum, text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side], linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text = text.replace("&", "&"). \
replace(">", ">"). \
replace("<", "<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ', ' ').rstrip()
color = ''
if '\0^' in text or '\0+' in text or '\0-' in text:
color = ';background-color:{0}'
if side == 0:
color = color.format('#ffe6e6')
else:
color = color.format('#e3ffe3')
return self.TD_DIFF_HEADER.format(id, linenum, color, text)
def _make_test_summary(self):
"""Return html tables for failed and passed tests."""
template = ('<div class="pull-left well well-small">'
'<h3 style="color:{2}">{0} Tests</h3>'
'<table border="1">\n <tr><th>Test Group</th>'
'<th>Test Name</th><th>Value</th></tr>{1}</table></div>')
failed = passed = ''
for diff, html in sorted(self._mapping.items()):
if html:
failed += diff.html_header_row()
else:
passed += diff.html_header_row()
output = ''
if passed:
output += template.format('Passed', passed, 'green')
if failed:
output += template.format('Failed', failed, 'red')
if self._show_legend:
output += ('<div class="pull-left well well-small">{}</div>'
.format(self._legend))
return '<div class="row-fluid">{}</div>'.format(output)
def make_whole_file(self):
tables = [x[1] for x in sorted(self._mapping.items()) if x[1]]
return self._file_template % {'summary': self._make_test_summary(),
'table': '\n'.join(tables)}
def _line_wrapper(self, diffs, hide_expected):
diffs = limit_revealed_lines_to(diffs, self._num_reveal_limit,
hide_expected)
return super(HTMLDiff, self)._line_wrapper(diffs)
def _make_prefix(self):
sameprefix = "same{0}_".format(HTMLDiff._default_prefix)
super(HTMLDiff, self)._make_prefix()
self._prefix.append(sameprefix)
def _convert_flags(self, fromlist, tolist, flaglist, context, numlines):
"""Handles making inline links in the document."""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
sameprefix = self._prefix[2]
# process change flags, generating middle column of next anchors/links
next_id = [''] * len(flaglist)
next_href = [''] * len(flaglist)
(change_positions, same_positions) = \
change_same_starting_points(flaglist)
change_positions_set = set(change_positions)
for numChange, changePos in enumerate(change_positions[: -1]):
next_id[changePos] = self.NEXT_ID_CHANGE.format(
toprefix, numChange)
next_href[changePos] = self.NEXT_HREF.format(
toprefix, numChange + 1)
for same_block, same_start_pos in enumerate(same_positions):
same_pos = same_start_pos
while same_pos < len(flaglist) and \
same_pos not in change_positions_set:
next_id[same_pos] = self.NEXT_ID_SAME.format(
sameprefix, same_block,
same_pos - same_start_pos + 1)
same_pos += 1
num_same_lines = same_pos - same_start_pos
if num_same_lines > self.MAX_SAME_LINES_BEFORE_SHOW_HIDE:
next_href[same_start_pos + 2] = self.SHOW_HIDE_ROWS
self._last_collapsed = True
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
if context:
fromlist = [self.NO_DIFFERENCES]
tolist = fromlist
else:
fromlist = tolist = [self.EMPTY_FILE]
# redo the last link to link to the top
if change_positions:
pos = change_positions[-1]
next_id[pos] = self.NEXT_ID_CHANGE.format(
toprefix, len(change_positions) - 1)
next_href[pos] = self.NEXT_HREF_TOP.format(toprefix)
return fromlist, tolist, flaglist, next_href, next_id
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.