gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
#!/usr/bin/env python
"""
SALTBIAS corrects the bias in SALT CCD data
Author Version Date
-----------------------------------------------
Martin Still (SAAO) 0.1 05 Sep 2006
S. M Crawford 0.2 05 April 2011
Updates:
------------------------------------------------------------
12 Dec 2010 Updated the fitting code to use most recent version of interfit
5 Apr 2011 Updated to use saltsafekey and to use TIME-OBS instead of
using UTC-OBS, added a catch to using interfit
10 Sep 2011 Removed master bias from here and moved to saltclean
Updated the error handling to current form
"""
from __future__ import with_statement
import os, string, sys, glob, time
from pyraf import iraf
from pyraf.iraf import pysalt
import numpy as np
import saltstat, saltfit
import saltsafeio as saltio
import saltsafekey as saltkey
from salterror import SaltError, SaltIOError
from saltsafelog import logging, history
debug=True
# Make sure the plotting functions work with an older version of matplotlib
try:
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pylab as plt
from matplotlib import font_manager
# -----------------------------------------------------------
# core routine
def saltbias(images,outimages,outpref,subover=True,trim=True,subbias=False,
masterbias='bias.fits', median=False, function='polynomial',
order=3, rej_lo=3, rej_hi=3, niter=10, plotover=False,
turbo=False, clobber=False, logfile='salt.log', verbose=True):
status = 0
ifil = 0
ii = 0
mbiasdata = []
bstruct = ''
biasgn = ''
biassp = ''
biasbn = ''
biasin = ''
filetime = {}
biastime = {}
for i in range(1,7):
filetime[i] = []
biastime[i] = []
with logging(logfile,debug) as log:
# Check the input images
infiles = saltio.argunpack ('Input',images)
# create list of output files
outfiles=saltio.listparse('Outfile', outimages, outpref,infiles,'')
# are input and output lists the same length?
saltio.comparelists(infiles,outfiles,'Input','output')
# Does master bias frame exist?
# gain, speed, binning and instrument of master bias frame
if subbias:
if os.path.isfile(masterbias):
bstruct = saltio.openfits(masterbias)
else:
message = 'Master bias frame %s does not exist' % masterbias
raise SaltError(message)
else:
bstruct=None
# open each raw image file
for img, oimg in zip(infiles, outfiles):
#open the file
struct = saltio.openfits(img)
#check to see if it has already been bias subtracted
instrume,keyprep,keygain,keybias,keyxtalk,keyslot = saltkey.instrumid(struct)
# has file been biaseded already?
try:
key = struct[0].header[keybias]
message = 'File %s has already been de-biased ' % infile
raise SaltError(message)
except:
pass
#compare with the master bias to make sure they are the same
if subbias:
pass
#subtract the bias
struct=bias(struct,subover=subover, trim=trim, subbias=subbias,
bstruct=bstruct, median=median, function=function,
order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter,
plotover=plotover, log=log, verbose=verbose)
#write the file out
# housekeeping keywords
fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref'])
saltkey.housekeeping(struct[0],keybias, 'Images have been de-biased', hist)
# write FITS file
saltio.writefits(struct,oimg, clobber=clobber)
saltio.closefits(struct)
def bias(struct,subover=True,trim=True, subbias=False, bstruct=None,
median=False, function='polynomial',order=3,rej_lo=3,rej_hi=3,niter=10,
plotover=False, log=None, verbose=True):
"""Bias subtracts the bias levels from a frame. It will fit and subtract the overscan
region, trim the images, and subtract a master bias if required.
struct--image structure
subover--subtract the overscan region
trim--trim the image
subbias--subtract master bias
bstruct--master bias image structure
median--use the median instead of mean in image statistics
function--form to fit to the overscan region
order--order for the function
rej_lo--sigma of low points to reject in the fit
rej_hi--sigma of high points to reject in the fit
niter--number of iterations
log--saltio log for recording information
verbose--whether to print to stdout
"""
infile=saltkey.getimagename(struct[0])
# how many extensions?
nsciext = saltkey.get('NSCIEXT',struct[0])
nextend = saltkey.get('NEXTEND',struct[0])
nccd = saltkey.get('NCCDS',struct[0])
# how many amplifiers?--this is hard wired
amplifiers = 2 * nccd
#log the process
if subover and log:
message = '%28s %7s %5s %4s %6s' % \
('HDU','Overscan','Order','RMS','Niter')
log.message('\n --------------------------------------------------',
with_header=False, with_stdout=verbose)
log.message(message, with_header=False, with_stdout=verbose)
log.message(' --------------------------------------------------',
with_header=False, with_stdout=verbose)
if (plotover):
plt.figure(1)
plt.axes([0.1,0.1,0.8,0.8])
plt.xlabel('CCD Column')
plt.ylabel('Pixel Counts (e-)')
plt.ion()
#loop through the extensions and subtract the bias
for i in range(1,nsciext+1):
if struct[i].name=='SCI':
#get the bias section
biassec = saltkey.get('BIASSEC',struct[i])
y1,y2,x1,x2 = saltio.getSection(biassec, iraf_format=True)
#get the data section
datasec = saltkey.get('DATASEC',struct[i])
dy1,dy2, dx1, dx2 = saltio.getSection(datasec, iraf_format=True)
#setup the overscan region
if subover:
yarr=np.arange(y1,y2, dtype=float)
data=struct[i].data
odata=struct[i].data[y1:y2,x1:x2]
if median:
odata=np.median((struct[i].data[y1:y2,x1:x2]),axis=1)
olevel=np.median((struct[i].data[y1:y2,x1:x2]))
saltkey.new('OVERSCAN','%f' % (olevel),'Overscan median value', struct[i])
else:
odata=np.mean((struct[i].data[y1:y2,x1:x2]),axis=1)
olevel=np.mean((struct[i].data[y1:y2,x1:x2]))
saltkey.new('OVERSCAN','%f' % (olevel),'Overscan mean value', struct[i])
#fit the overscan region
ifit=saltfit.interfit(yarr, odata, function=function, \
order=order, thresh=rej_hi, niter=niter)
try:
ifit.interfit()
coeffs=ifit.coef
ofit=ifit(yarr)
omean, omed, osigma=saltstat.iterstat((odata-ofit), sig=3, niter=5)
except ValueError:
#catch the error if it is a zero array
ofit=np.array(yarr)*0.0
osigma=0.0
except TypeError:
#catch the error if it is a zero array
ofit=np.array(yarr)*0.0
osigma=0.0
#if it hasn't been already, convert image to
#double format
struct[i].data = 1.0 * struct[i].data
try:
struct[i].header.remove('BZERO')
struct[i].header.remove('BSCALE')
except:
pass
#subtract the overscan region
for j in range(len(struct[i].data[0])):
struct[i].data[y1:y2,j] -= ofit
#report the information
if log:
message = '%25s[%1d] %8.2f %3d %7.2f %3d' % \
(infile, i, olevel, order, osigma, niter)
log.message(message, with_stdout=verbose, with_header=False)
#add the statistics to the image header
saltkey.new('OVERRMS','%f' % (osigma),'Overscan RMS value', struct[i])
#update the variance frame
if saltkey.found('VAREXT', struct[i]):
vhdu=saltkey.get('VAREXT', struct[i])
try:
vdata=struct[vhdu].data
#The bias level should not be included in the noise from the signal
for j in range(len(struct[i].data[0])):
vdata[y1:y2,j] -= ofit
#add a bit to make sure that the minimum error is the rednoise
rdnoise= saltkey.get('RDNOISE',struct[i])
vdata[vdata<rdnoise**2]=rdnoise**2
struct[vhdu].data=vdata+osigma**2
except Exception, e:
msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e)
raise SaltError(msg)
#plot the overscan region
if plotover:
plt.plot(yarr, odata)
plt.plot(yarr, ofit)
#trim the data and update the headers
if trim:
struct[i].data=struct[i].data[dy1:dy2,dx1:dx2]
datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
saltkey.put('DATASEC',datasec,struct[i])
#update the variance frame
if saltkey.found('VAREXT', struct[i]):
vhdu=saltkey.get('VAREXT', struct[i])
struct[vhdu].data=struct[vhdu].data[dy1:dy2,dx1:dx2]
datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
saltkey.put('DATASEC',datasec,struct[vhdu])
#update the BPM frame
if saltkey.found('BPMEXT', struct[i]):
bhdu=saltkey.get('BPMEXT', struct[i])
struct[bhdu].data=struct[bhdu].data[dy1:dy2,dx1:dx2]
datasec = '[1:'+str(dx2-dx1)+',1:'+str(dy2-dy1)+']'
saltkey.put('DATASEC',datasec,struct[bhdu])
#subtract the master bias if necessary
if subbias and bstruct:
struct[i].data -= bstruct[i].data
#update the variance frame
if saltkey.found('VAREXT', struct[i]):
vhdu=saltkey.get('VAREXT', struct[i])
try:
vdata=struct[vhdu].data
struct[vhdu].data=vdata+bstruct[vhdu].data
except Exception, e:
msg='Cannot update the variance frame in %s[%i] because %s' % (infile, vhdu, e)
raise SaltError(msg)
if plotover:
plt.ioff()
plt.show()
return struct
# -----------------------------------------------------------
# main code
if not iraf.deftask('saltbias'):
parfile = iraf.osfn("saltred$saltbias.par")
t = iraf.IrafTaskFactory(taskname="saltbias",value=parfile,function=saltbias, pkgname='saltred')
|
|
# -*- coding: utf-8 -*-
from exceptions import BufferNotFound, BufferNotInSync
from liborgmode import Document, Heading, MultiPurposeList, DIRECTION_BACKWARD
import settings
import vim
from UserList import UserList
class VimBufferContent(MultiPurposeList):
u""" Vim Buffer Content is a UTF-8 wrapper around a vim buffer. When
retrieving or setting items in the buffer an automatic conversion is
performed.
This ensures UTF-8 usage on the side of liborgmode and the vim plugin
vim-orgmode.
"""
def __init__(self, vimbuffer, on_change=None):
MultiPurposeList.__init__(self, on_change=on_change)
# replace data with vimbuffer to make operations change the actual
# buffer
self.data = vimbuffer
def __contains__(self, item):
i = item
if type(i) is unicode:
i = item.encode(u'utf-8')
return MultiPurposeList.__contains__(self, i)
def __getitem__(self, i):
item = MultiPurposeList.__getitem__(self, i)
if type(item) is str:
return item.decode(u'utf-8')
return item
def __getslice__(self, i, j):
return [item.decode(u'utf-8') if type(item) is str else item \
for item in MultiPurposeList.__getslice__(self, i, j)]
def __setitem__(self, i, item):
_i = item
if type(_i) is unicode:
_i = item.encode(u'utf-8')
MultiPurposeList.__setitem__(self, i, _i)
def __setslice__(self, i, j, other):
o = []
o_tmp = other
if type(o_tmp) not in (list, tuple) and not isinstance(o_tmp, UserList):
o_tmp = list(o_tmp)
for item in o_tmp:
if type(item) == unicode:
o.append(item.encode(u'utf-8'))
else:
o.append(item)
MultiPurposeList.__setslice__(self, i, j, o)
def __add__(self, other):
raise NotImplementedError()
# TODO: implement me
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
raise NotImplementedError()
# TODO: implement me
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
o = []
o_tmp = other
if type(o_tmp) not in (list, tuple) and not isinstance(o_tmp, UserList):
o_tmp = list(o_tmp)
for i in o_tmp:
if type(i) is unicode:
o.append(i.encode(u'utf-8'))
else:
o.append(i)
return MultiPurposeList.__iadd__(self, o)
def append(self, item):
i = item
if type(item) is str:
i = item.encode(u'utf-8')
MultiPurposeList.append(self, i)
def insert(self, i, item):
_i = item
if type(_i) is str:
_i = item.encode(u'utf-8')
MultiPurposeList.insert(self, i, _i)
def index(self, item, *args):
i = item
if type(i) is unicode:
i = item.encode(u'utf-8')
MultiPurposeList.index(self, i, *args)
def pop(self, i=-1):
return MultiPurposeList.pop(self, i).decode(u'utf-8')
def extend(self, other):
o = []
o_tmp = other
if type(o_tmp) not in (list, tuple) and not isinstance(o_tmp, UserList):
o_tmp = list(o_tmp)
for i in o_tmp:
if type(i) is unicode:
o.append(i.encode(u'utf-8'))
else:
o.append(i)
MultiPurposeList.extend(self, o)
class VimBuffer(Document):
def __init__(self, bufnr=0):
u"""
:bufnr: 0: current buffer, every other number refers to another buffer
"""
Document.__init__(self)
self._bufnr = vim.current.buffer.number if bufnr == 0 else bufnr
self._changedtick = -1
if self._bufnr == vim.current.buffer.number:
self._content = VimBufferContent(vim.current.buffer)
else:
_buffer = None
for b in vim.buffers:
if self._bufnr == b.number:
_buffer = b
break
if not _buffer:
raise BufferNotFound(u'Unable to locate buffer number #%d' % self._bufnr)
self._content = VimBufferContent(_buffer)
self.update_changedtick()
self._orig_changedtick = self._changedtick
@property
def tabstop(self):
return int(vim.eval(u'&ts'.encode(u'utf-8')))
@property
def tag_column(self):
return int(settings.get('org_tag_column', '77'))
@property
def is_insync(self):
if self._changedtick == self._orig_changedtick:
self.update_changedtick()
return self._changedtick == self._orig_changedtick
@property
def bufnr(self):
u"""
:returns: The buffer's number for the current document
"""
return self._bufnr
def changedtick():
""" Number of changes in vimbuffer """
def fget(self):
return self._changedtick
def fset(self, value):
self._changedtick = value
return locals()
changedtick = property(**changedtick())
def update_changedtick(self):
if self.bufnr == vim.current.buffer.number:
self._changedtick = int(vim.eval(u'b:changedtick'.encode(u'utf-8')))
else:
vim.command(u'unlet! g:org_changedtick | let g:org_lz = &lz | let g:org_hidden = &hidden | set lz hidden'.encode(u'utf-8'))
# TODO is this likely to fail? maybe some error hangling should be added
vim.command((u'keepalt buffer %d | let g:org_changedtick = b:changedtick | buffer %d' % \
(self.bufnr, vim.current.buffer.number)).encode(u'utf-8'))
vim.command(u'let &lz = g:org_lz | let &hidden = g:org_hidden | unlet! g:org_lz g:org_hidden | redraw'.encode(u'utf-8'))
self._changedtick = int(vim.eval(u'g:org_changedtick'.encode(u'utf-8')))
def write(self):
u""" write the changes to the vim buffer
:returns: True if something was written, otherwise False
"""
if not self.is_dirty:
return False
self.update_changedtick()
if not self.is_insync:
raise BufferNotInSync(u'Buffer is not in sync with vim!')
# write meta information
if self.is_dirty_meta_information:
meta_end = 0 if self._orig_meta_information_len is None else self._orig_meta_information_len
self._content[:meta_end] = self.meta_information
self._orig_meta_information_len = len(self.meta_information)
# remove deleted headings
already_deleted = []
for h in sorted(self._deleted_headings, cmp=lambda x, y: cmp(x._orig_start, y._orig_start), reverse=True):
if h._orig_start is not None and h._orig_start not in already_deleted:
# this is a heading that actually exists on the buffer and it
# needs to be removed
del self._content[h._orig_start:h._orig_start + h._orig_len]
already_deleted.append(h._orig_start)
del self._deleted_headings[:]
del already_deleted
# update changed headings and add new headings
for h in self.all_headings():
if h.is_dirty:
if h._orig_start is not None:
# this is a heading that existed before and was changed. It
# needs to be replaced
if h.is_dirty_heading:
self._content[h.start:h.start + 1] = [unicode(h)]
if h.is_dirty_body:
self._content[h.start + 1:h.start + h._orig_len] = h.body
else:
# this is a new heading. It needs to be inserted
self._content[h.start:h.start] = [unicode(h)] + h.body
h._dirty_heading = False
h._dirty_body = False
# for all headings the length and start offset needs to be updated
h._orig_start = h.start
h._orig_len = len(h)
self._dirty_meta_information = False
self._dirty_document = False
self.update_changedtick()
self._orig_changedtick = self._changedtick
return True
def write_heading(self, heading, including_children=True):
""" WARNING: use this function only when you know what you are doing!
This function writes a heading to the vim buffer. It offers performance
advantages over the regular write() function. This advantage is
combined with no sanity checks! Whenever you use this function, make
sure the heading you are writing contains the right offsets
(Heading._orig_start, Heading._orig_len).
Usage example:
# Retrieve a potentially dirty document
d = ORGMODE.get_document(allow_dirty=True)
# Don't rely on the DOM, retrieve the heading afresh
h = d.find_heading(direction=DIRECTION_FORWARD, position=100)
# Update tags
h.tags = ['tag1', 'tag2']
# Write the heading
d.write_heading(h)
This function can't be used to delete a heading!
:heading: Write this heading with to the vim buffer
:including_children: Also include children in the update
:returns The written heading
"""
if including_children and heading.children:
for child in heading.children[::-1]:
self.write_heading(child, including_children)
if heading.is_dirty:
if heading._orig_start is not None:
# this is a heading that existed before and was changed. It
# needs to be replaced
if heading.is_dirty_heading:
self._content[heading._orig_start:heading._orig_start + 1] = [unicode(heading)]
if heading.is_dirty_body:
self._content[heading._orig_start + 1:heading._orig_start + heading._orig_len] = heading.body
else:
# this is a new heading. It needs to be inserted
raise ValueError('Heading must contain the attribute _orig_start! %s' % heading)
heading._dirty_heading = False
heading._dirty_body = False
# for all headings the length offset needs to be updated
heading._orig_len = len(heading)
return heading
def previous_heading(self, position=None):
u""" Find the next heading (search forward) and return the related object
:returns: Heading object or None
"""
h = self.current_heading(position=position)
if h:
return h.previous_heading
def current_heading(self, position=None):
u""" Find the current heading (search backward) and return the related object
:returns: Heading object or None
"""
if position is None:
position = vim.current.window.cursor[0] - 1
for h in self.all_headings():
if h.start <= position and h.end >= position:
return h
def next_heading(self, position=None):
u""" Find the next heading (search forward) and return the related object
:returns: Heading object or None
"""
h = self.current_heading(position=position)
if h:
return h.next_heading
def find_current_heading(self, position=None, heading=Heading):
u""" Find the next heading backwards from the position of the cursor.
The difference to the function current_heading is that the returned
object is not built into the DOM. In case the DOM doesn't exist or is
out of sync this function is much faster in fetching the current
heading.
:position: The position to start the search from
:heading: The base class for the returned heading
:returns: Heading object or None
"""
return self.find_heading(vim.current.window.cursor[0] - 1 \
if position is None else position, \
direction=DIRECTION_BACKWARD, heading=heading, \
connect_with_document=False)
|
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import hashlib
import logging
import os
import re
from guild import click_util
from guild import cmd_impl_support
from guild import remote as remotelib
from guild import util
from guild import var
from guild.commands import runs_impl
log = logging.getLogger("guild")
class MetaSyncRemote(remotelib.Remote):
def __init__(self, runs_dir, deleted_runs_dir=None):
self._runs_dir = runs_dir
self._deleted_runs_dir = deleted_runs_dir
def list_runs(self, **opts):
deleted = opts.pop("deleted", False)
if deleted and not self._deleted_runs_dir:
raise remotelib.OperationNotSupported(
"remote '%s' does not support '--delete' option" % self.name
)
self._sync_runs_meta()
runs_dir = self._deleted_runs_dir if deleted else self._runs_dir
if not os.path.exists(runs_dir):
return
assert not opts.get("archive"), opts
assert not opts.get("remote"), opts
args = click_util.Args(
deleted=False, archive=runs_dir, remote=None, json=False, **opts
)
try:
runs_impl.list_runs(args)
except SystemExit as e:
raise self._fix_system_exit_msg_for_remote(e, ["runs list", "runs"])
def _sync_runs_meta(self, force=False):
raise NotImplementedError()
def filtered_runs(self, **filters):
self._sync_runs_meta()
args = click_util.Args(archive=self._runs_dir, remote=None, runs=[], **filters)
return runs_impl.runs_for_args(args)
def delete_runs(self, **opts):
if not self._deleted_runs_dir and not opts.get("permanent"):
raise remotelib.OperationNotSupported(
"remote '%s' does not support non permanent deletes\n"
"Use the '--permanent' with this command and try again." % self.name
)
args = click_util.Args(archive=self._runs_dir, remote=None, **opts)
self._sync_runs_meta()
if args.permanent:
preview = cmd_impl_support.format_warn(
"WARNING: You are about to permanently delete "
"the following runs on %s:" % self.name
)
confirm = "Permanently delete these runs?"
else:
preview = "You are about to delete the following runs on %s:" % self.name
confirm = "Delete these runs?"
no_runs_help = "Nothing to delete."
def delete_f(selected):
self._delete_runs(selected, args.permanent)
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
preview,
confirm,
no_runs_help,
delete_f,
confirm_default=not args.permanent,
)
except SystemExit as e:
raise self._fix_system_exit_msg_for_remote(e, ["runs rm", "runs delete"])
def _delete_runs(self, runs, permanent):
raise NotImplementedError()
def _fix_system_exit_msg_for_remote(self, e, cmds):
from guild import main
assert isinstance(e, SystemExit), e
msg, code = main.system_exit_params(e)
if not msg:
raise SystemExit(code)
for cmd in cmds:
maybe_changed = msg.replace(
"guild %s" % self.name, "guild %s -r %s" % (self.name, cmd)
)
if maybe_changed != msg:
msg = maybe_changed
break
raise SystemExit(msg, code)
def restore_runs(self, **opts):
if not self._deleted_runs_dir:
raise remotelib.OperationNotSupported()
self._sync_runs_meta()
args = click_util.Args(archive=self._deleted_runs_dir, remote=None, **opts)
preview = "You are about to restore the following runs on %s:" % self.name
confirm = "Restore these runs?"
no_runs_help = "Nothing to restore."
def restore_f(selected):
self._restore_runs(selected)
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
preview,
confirm,
no_runs_help,
restore_f,
confirm_default=True,
)
except SystemExit as e:
self._fix_system_exit_msg_for_remote(e, ["runs restore"])
def _restore_runs(self, runs):
raise NotImplementedError()
def purge_runs(self, **opts):
if not self._deleted_runs_dir:
raise remotelib.OperationNotSupported()
self._sync_runs_meta()
args = click_util.Args(archive=self._deleted_runs_dir, remote=None, **opts)
preview = (
"WARNING: You are about to permanently delete "
"the following runs on %s:" % self.name
)
confirm = "Permanently delete these runs?"
no_runs_help = "Nothing to purge."
def purge_f(selected):
self._purge_runs(selected)
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
preview,
confirm,
no_runs_help,
purge_f,
confirm_default=False,
)
except SystemExit as e:
self._fix_system_exit_msg_for_remote(e, ["runs purge"])
def _purge_runs(self, runs):
raise NotImplementedError()
def run_info(self, **opts):
self._sync_runs_meta()
args = click_util.Args(**opts)
args.archive = self._runs_dir
args.remote = None
args.private_attrs = False
runs_impl.run_info(args, None)
def local_meta_dir(remote_name, key):
base_dir = var.remote_dir(_safe_filename(remote_name))
key_hash = hashlib.md5(key.encode()).hexdigest()
return os.path.join(base_dir, "meta", key_hash)
def _safe_filename(s):
if not s:
return s
return re.sub(r"\W+", "-", s).strip("-") or "-"
def local_meta_id(local_sync_dir):
id_path = os.path.join(local_sync_dir, "meta-id")
return util.try_read(id_path, apply=str.strip)
def clear_local_meta_id(local_sync_dir):
id_path = os.path.join(local_sync_dir, "meta-id")
util.ensure_deleted(id_path)
def write_local_meta_id(meta_id, local_sync_dir):
assert meta_id is not None, "meta_id cannot be None"
id_path = os.path.join(local_sync_dir, "meta-id")
with open(id_path, "w") as f:
f.write(meta_id)
def meta_current(local_sync_dir, remote_meta_id_cb):
local_id = local_meta_id(local_sync_dir)
if local_id is None:
log.debug("local meta-id not found, meta not current")
return False
remote_id = remote_meta_id_cb()
log.debug("local meta-id: %s", local_id)
log.debug("remote meta-id: %s", remote_id)
return local_id == remote_id
|
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo"""
import errno
import http.client
import random
import time
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
create_confirmed_utxos,
hex_str_to_bytes,
)
class ChainstateWriteCrashTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.rpc_timeout = 480
self.supports_cli = False
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"]
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to the in-memory coins cache.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
# and non-standard txs (e.g. txs with "dust" outputs)
self.node3_args = ["-blockmaxweight=4000000", "-acceptnonstdtxn"]
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If syscoind exits, then try again. wait_for_node_exit()
# should raise an exception if syscoind doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, syscoind isn't coming back up on restart. Could be a
# bug in syscoind, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height %d, considering height %d", current_height, random_height)
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug("Invalidating block at height %d", random_height)
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warning("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=g-classes-have-attributes
"""Constraints: functions that impose constraints on weight values."""
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.constraints.Constraint')
class Constraint:
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(tf.keras.constraints.Constraint):
...
... def __call__(self, w):
... return w * tf.cast(tf.math.greater_equal(w, 0.), w.dtype)
>>> weight = tf.constant((-1.0, 1.0))
>>> NonNegative()(weight)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 1.], dtype=float32)>
>>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative())
"""
def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w
def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}
@keras_export('keras.constraints.MaxNorm', 'keras.constraints.max_norm')
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `tf.keras.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = backend.clip(norms, 0, self.max_value)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis}
@keras_export('keras.constraints.NonNeg', 'keras.constraints.non_neg')
class NonNeg(Constraint):
"""Constrains the weights to be non-negative.
Also available via the shortcut function `tf.keras.constraints.non_neg`.
"""
def __call__(self, w):
return w * math_ops.cast(math_ops.greater_equal(w, 0.), backend.floatx())
@keras_export('keras.constraints.UnitNorm', 'keras.constraints.unit_norm')
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Also available via the shortcut function `tf.keras.constraints.unit_norm`.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
return w / (
backend.epsilon() + backend.sqrt(
math_ops.reduce_sum(
math_ops.square(w), axis=self.axis, keepdims=True)))
@doc_controls.do_not_generate_docs
def get_config(self):
return {'axis': self.axis}
@keras_export('keras.constraints.MinMaxNorm', 'keras.constraints.min_max_norm')
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Also available via the shortcut function `tf.keras.constraints.min_max_norm`.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
@doc_controls.do_not_generate_docs
def __call__(self, w):
norms = backend.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * backend.clip(norms, self.min_value, self.max_value) +
(1 - self.rate) * norms)
return w * (desired / (backend.epsilon() + norms))
@doc_controls.do_not_generate_docs
def get_config(self):
return {
'min_value': self.min_value,
'max_value': self.max_value,
'rate': self.rate,
'axis': self.axis
}
@keras_export('keras.constraints.RadialConstraint',
'keras.constraints.radial_constraint')
class RadialConstraint(Constraint):
"""Constrains `Conv2D` kernel weights to be the same for each radius.
Also available via the shortcut function
`tf.keras.constraints.radial_constraint`.
For example, the desired output for the following 4-by-4 kernel:
```
kernel = [[v_00, v_01, v_02, v_03],
[v_10, v_11, v_12, v_13],
[v_20, v_21, v_22, v_23],
[v_30, v_31, v_32, v_33]]
```
is this::
```
kernel = [[v_11, v_11, v_11, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_33, v_33, v_11],
[v_11, v_11, v_11, v_11]]
```
This constraint can be applied to any `Conv2D` layer version, including
`Conv2DTranspose` and `SeparableConv2D`, and with either `"channels_last"` or
`"channels_first"` data format. The method assumes the weight tensor is of
shape `(rows, cols, input_depth, output_depth)`.
"""
@doc_controls.do_not_generate_docs
def __call__(self, w):
w_shape = w.shape
if w_shape.rank is None or w_shape.rank != 4:
raise ValueError(
'The weight tensor must be of rank 4, but is of shape: %s' % w_shape)
height, width, channels, kernels = w_shape
w = backend.reshape(w, (height, width, channels * kernels))
# TODO(cpeter): Switch map_fn for a faster tf.vectorized_map once
# backend.switch is supported.
w = backend.map_fn(
self._kernel_constraint,
backend.stack(array_ops.unstack(w, axis=-1), axis=0))
return backend.reshape(backend.stack(array_ops.unstack(w, axis=0), axis=-1),
(height, width, channels, kernels))
def _kernel_constraint(self, kernel):
"""Radially constraints a kernel with shape (height, width, channels)."""
padding = backend.constant([[1, 1], [1, 1]], dtype='int32')
kernel_shape = backend.shape(kernel)[0]
start = backend.cast(kernel_shape / 2, 'int32')
kernel_new = backend.switch(
backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'),
lambda: kernel[start - 1:start, start - 1:start],
lambda: kernel[start - 1:start, start - 1:start] + backend.zeros( # pylint: disable=g-long-lambda
(2, 2), dtype=kernel.dtype))
index = backend.switch(
backend.cast(math_ops.floormod(kernel_shape, 2), 'bool'),
lambda: backend.constant(0, dtype='int32'),
lambda: backend.constant(1, dtype='int32'))
while_condition = lambda index, *args: backend.less(index, start)
def body_fn(i, array):
return i + 1, array_ops.pad(
array,
padding,
constant_values=kernel[start + i, start + i])
_, kernel_new = control_flow_ops.while_loop(
while_condition,
body_fn,
[index, kernel_new],
shape_invariants=[index.get_shape(),
tensor_shape.TensorShape([None, None])])
return kernel_new
# Aliases.
max_norm = MaxNorm
non_neg = NonNeg
unit_norm = UnitNorm
min_max_norm = MinMaxNorm
radial_constraint = RadialConstraint
# Legacy aliases.
maxnorm = max_norm
nonneg = non_neg
unitnorm = unit_norm
@keras_export('keras.constraints.serialize')
def serialize(constraint):
return serialize_keras_object(constraint)
@keras_export('keras.constraints.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='constraint')
@keras_export('keras.constraints.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, str):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret constraint identifier: ' +
str(identifier))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# HTTPSClientAuthConnection code comes courtesy of ActiveState website:
# http://code.activestate.com/recipes/
# 577548-https-httplib-client-connection-with-certificate-v/
import collections
import functools
import httplib
import os
import urllib
import urlparse
try:
from eventlet.green import socket
from eventlet.green import ssl
except ImportError:
import socket
import ssl
from heat_cfnclient.common import auth
from heat_cfnclient.common import exception
from heat_cfnclient.common import utils
from heat_cfnclient.openstack.common.gettextutils import _
# common chunk size for get and put
CHUNKSIZE = 65536
def handle_unauthorized(func):
"""
Wrap a function to re-authenticate and retry.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except exception.NotAuthorized:
self._authenticate(force_reauth=True)
return func(self, *args, **kwargs)
return wrapped
def handle_redirects(func):
"""
Wrap the _do_request function to handle HTTP redirects.
"""
MAX_REDIRECTS = 5
@functools.wraps(func)
def wrapped(self, method, url, body, headers):
for _ in xrange(MAX_REDIRECTS):
try:
return func(self, method, url, body, headers)
except exception.RedirectException as redirect:
if redirect.url is None:
raise exception.InvalidRedirect()
url = redirect.url
raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
return wrapped
class ImageBodyIterator(object):
"""
A class that acts as an iterator over an image file's
chunks of data. This is returned as part of the result
tuple from `heat_cfnclient.client.Client.get_image`
"""
def __init__(self, source):
"""
Constructs the object from a readable image source
(such as an HTTPResponse or file-like object)
"""
self.source = source
def __iter__(self):
"""
Exposes an iterator over the chunks of data in the
image file.
"""
while True:
chunk = self.source.read(CHUNKSIZE)
if chunk:
yield chunk
else:
break
class HTTPSClientAuthConnection(httplib.HTTPSConnection):
"""
Class to make a HTTPS connection, with support for
full client-based SSL Authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file, cert_file,
ca_file, timeout=None, insecure=False):
httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""
Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
# Check CA file unless 'insecure' is specificed
if self.insecure is True:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_NONE)
else:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ca_certs=self.ca_file,
cert_reqs=ssl.CERT_REQUIRED)
class BaseClient(object):
"""A base client class."""
DEFAULT_PORT = 80
DEFAULT_DOC_ROOT = None
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
DEFAULT_CA_FILE_PATH = '/etc/ssl/certs/ca-certificates.crt:'\
'/etc/pki/tls/certs/ca-bundle.crt:'\
'/etc/ssl/ca-bundle.pem:'\
'/etc/ssl/cert.pem'
OK_RESPONSE_CODES = (
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT,
)
REDIRECT_RESPONSE_CODES = (
httplib.MOVED_PERMANENTLY,
httplib.FOUND,
httplib.SEE_OTHER,
httplib.USE_PROXY,
httplib.TEMPORARY_REDIRECT,
)
def __init__(self, host=None, port=None, use_ssl=False, auth_tok=None,
creds=None, doc_root=None, key_file=None,
cert_file=None, ca_file=None, insecure=False,
configure_via_auth=True, service_type=None):
"""
Creates a new client to some service.
:param host: The host where service resides
:param port: The port where service resides
:param use_ssl: Should we use HTTPS?
:param auth_tok: The auth token to pass to the server
:param creds: The credentials to pass to the auth plugin
:param doc_root: Prefix for all URLs we request from host
:param key_file: Optional PEM-formatted file that contains the private
key.
If use_ssl is True, and this param is None (the
default), then an environ variable
HEAT_CLIENT_KEY_FILE is looked for. If no such
environ variable is found, ClientConnectionError
will be raised.
:param cert_file: Optional PEM-formatted certificate chain file.
If use_ssl is True, and this param is None (the
default), then an environ variable
HEAT_CLIENT_CERT_FILE is looked for. If no such
environ variable is found, ClientConnectionError
will be raised.
:param ca_file: Optional CA cert file to use in SSL connections
If use_ssl is True, and this param is None (the
default), then an environ variable
HEAT_CLIENT_CA_FILE is looked for.
:param insecure: Optional. If set then the server's certificate
will not be verified.
"""
self.host = host
self.port = port or self.DEFAULT_PORT
self.use_ssl = use_ssl
self.auth_tok = auth_tok
self.creds = creds or {}
self.connection = None
self.configure_via_auth = configure_via_auth
self.service_type = service_type
# doc_root can be a nullstring, which is valid, and why we
# cannot simply do doc_root or self.DEFAULT_DOC_ROOT below.
self.doc_root = (doc_root if doc_root is not None
else self.DEFAULT_DOC_ROOT)
self.auth_plugin = self.make_auth_plugin(self.creds)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.insecure = insecure
self.connect_kwargs = self.get_connect_kwargs()
def get_connect_kwargs(self):
connect_kwargs = {}
if self.use_ssl:
if self.key_file is None:
self.key_file = os.environ.get('HEAT_CLIENT_KEY_FILE')
if self.cert_file is None:
self.cert_file = os.environ.get('HEAT_CLIENT_CERT_FILE')
if self.ca_file is None:
self.ca_file = os.environ.get('HEAT_CLIENT_CA_FILE')
# Check that key_file/cert_file are either both set or both unset
if self.cert_file is not None and self.key_file is None:
msg = _("You have selected to use SSL in connecting, "
"and you have supplied a cert, "
"however you have failed to supply either a "
"key_file parameter or set the "
"HEAT_CLIENT_KEY_FILE environ variable")
raise exception.ClientConnectionError(msg)
if self.key_file is not None and self.cert_file is None:
msg = _("You have selected to use SSL in connecting, "
"and you have supplied a key, "
"however you have failed to supply either a "
"cert_file parameter or set the "
"HEAT_CLIENT_CERT_FILE environ variable")
raise exception.ClientConnectionError(msg)
if (self.key_file is not None and
not os.path.exists(self.key_file)):
msg = _("The key file you specified %s does not "
"exist") % self.key_file
raise exception.ClientConnectionError(msg)
connect_kwargs['key_file'] = self.key_file
if (self.cert_file is not None and
not os.path.exists(self.cert_file)):
msg = _("The cert file you specified %s does not "
"exist") % self.cert_file
raise exception.ClientConnectionError(msg)
connect_kwargs['cert_file'] = self.cert_file
if (self.ca_file is not None and
not os.path.exists(self.ca_file)):
msg = _("The CA file you specified %s does not "
"exist") % self.ca_file
raise exception.ClientConnectionError(msg)
if self.ca_file is None:
for ca in self.DEFAULT_CA_FILE_PATH.split(":"):
if os.path.exists(ca):
self.ca_file = ca
break
connect_kwargs['ca_file'] = self.ca_file
connect_kwargs['insecure'] = self.insecure
return connect_kwargs
def set_auth_token(self, auth_tok):
"""
Updates the authentication token for this client connection.
"""
# FIXME(sirp): Nova image/heat.py currently calls this. Since this
# method isn't really doing anything useful[1], we should go ahead and
# rip it out, first in Nova, then here. Steps:
#
# 1. Change auth_tok in heat to auth_token
# 2. Change image/heat.py in Nova to use client.auth_token
# 3. Remove this method
#
# [1] http://mail.python.org/pipermail/tutor/2003-October/025932.html
self.auth_tok = auth_tok
def configure_from_url(self, url):
"""
Setups the connection based on the given url.
The form is:
<http|https>://<host>:port/doc_root
"""
parsed = urlparse.urlparse(url)
self.use_ssl = parsed.scheme == 'https'
if self.host is None:
self.host = parsed.hostname
self.port = parsed.port or 80
self.doc_root = parsed.path
# ensure connection kwargs are re-evaluated after the service catalog
# publicURL is parsed for potential SSL usage
self.connect_kwargs = self.get_connect_kwargs()
def make_auth_plugin(self, creds):
"""
Returns an instantiated authentication plugin.
"""
strategy = creds.get('strategy', 'noauth')
plugin = auth.get_plugin_from_strategy(strategy,
creds, self.service_type)
return plugin
def get_connection_type(self):
"""
Returns the proper connection type
"""
if self.use_ssl:
return HTTPSClientAuthConnection
else:
return httplib.HTTPConnection
def _authenticate(self, force_reauth=False):
"""
Use the authentication plugin to authenticate and set the auth token.
:param force_reauth: For re-authentication to bypass cache.
"""
auth_plugin = self.auth_plugin
if not auth_plugin.is_authenticated or force_reauth:
auth_plugin.authenticate()
self.auth_tok = auth_plugin.auth_token
management_url = auth_plugin.management_url
if management_url and self.configure_via_auth:
self.configure_from_url(management_url)
@handle_unauthorized
def do_request(self, method, action, body=None, headers=None,
params=None):
"""
Make a request, returning an HTTP response object.
:param method: HTTP verb (GET, POST, PUT, etc.)
:param action: Requested path to append to self.doc_root
:param body: Data to send in the body of the request
:param headers: Headers to send with the request
:param params: Key/value pairs to use in query string
:returns: HTTP response object
"""
if not self.auth_tok:
self._authenticate()
url = self._construct_url(action, params)
return self._do_request(method=method, url=url, body=body,
headers=headers)
def _construct_url(self, action, params=None):
"""
Create a URL object we can use to pass to _do_request().
"""
path = '/'.join([self.doc_root or '', action.lstrip('/')])
scheme = "https" if self.use_ssl else "http"
netloc = "%s:%d" % (self.host, self.port)
if isinstance(params, dict):
for (key, value) in params.items():
if value is None:
del params[key]
query = urllib.urlencode(params)
else:
query = None
return urlparse.ParseResult(scheme, netloc, path, '', query, '')
@handle_redirects
def _do_request(self, method, url, body, headers):
"""
Connects to the server and issues a request. Handles converting
any returned HTTP error status codes to OpenStack/heat exceptions
and closing the server connection. Returns the result data, or
raises an appropriate exception.
:param method: HTTP method ("GET", "POST", "PUT", etc...)
:param url: urlparse.ParsedResult object with URL information
:param body: data to send (as string, filelike or iterable),
or None (default)
:param headers: mapping of key/value pairs to add as headers
:note
If the body param has a read attribute, and method is either
POST or PUT, this method will automatically conduct a chunked-transfer
encoding and use the body as a file object or iterable, transferring
chunks of data using the connection's send() method. This allows large
objects to be transferred efficiently without buffering the entire
body in memory.
"""
if url.query:
path = url.path + "?" + url.query
else:
path = url.path
try:
connection_type = self.get_connection_type()
headers = headers or {}
if 'x-auth-token' not in headers and self.auth_tok:
headers['x-auth-token'] = self.auth_tok
c = connection_type(url.hostname, url.port, **self.connect_kwargs)
def _pushing(method):
return method.lower() in ('post', 'put')
def _simple(body):
return body is None or isinstance(body, basestring)
def _filelike(body):
return hasattr(body, 'read')
def _sendbody(connection, iter):
connection.endheaders()
for sent in iter:
# iterator has done the heavy lifting
pass
def _chunkbody(connection, iter):
connection.putheader('Transfer-Encoding', 'chunked')
connection.endheaders()
for chunk in iter:
connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
connection.send('0\r\n\r\n')
# Do a simple request or a chunked request, depending
# on whether the body param is file-like or iterable and
# the method is PUT or POST
#
if not _pushing(method) or _simple(body):
# Simple request...
c.request(method, path, body, headers)
elif _filelike(body) or self._iterable(body):
c.putrequest(method, path)
for header, value in headers.items():
c.putheader(header, value)
iter = self.image_iterator(c, headers, body)
_chunkbody(c, iter)
else:
raise TypeError('Unsupported image type: %s' % body.__class__)
res = c.getresponse()
def _retry(res):
return res.getheader('Retry-After')
status_code = self.get_status_code(res)
if status_code in self.OK_RESPONSE_CODES:
return res
elif status_code in self.REDIRECT_RESPONSE_CODES:
raise exception.RedirectException(res.getheader('Location'))
elif status_code == httplib.UNAUTHORIZED:
raise exception.NotAuthorized()
elif status_code == httplib.FORBIDDEN:
raise exception.NotAuthorized()
elif status_code == httplib.NOT_FOUND:
raise exception.NotFound(res.read())
elif status_code == httplib.CONFLICT:
raise exception.Duplicate(res.read())
elif status_code == httplib.BAD_REQUEST:
raise exception.Invalid(reason=res.read())
elif status_code == httplib.MULTIPLE_CHOICES:
raise exception.MultipleChoices(body=res.read())
elif status_code == httplib.REQUEST_ENTITY_TOO_LARGE:
raise exception.LimitExceeded(retry=_retry(res),
body=res.read())
elif status_code == httplib.INTERNAL_SERVER_ERROR:
raise Exception("Internal Server error: %s" % res.read())
elif status_code == httplib.SERVICE_UNAVAILABLE:
raise exception.ServiceUnavailable(retry=_retry(res))
elif status_code == httplib.REQUEST_URI_TOO_LONG:
raise exception.RequestUriTooLong(body=res.read())
else:
raise Exception("Unknown error occurred! %s" % res.read())
except (socket.error, IOError) as e:
raise exception.ClientConnectionError(e)
def _iterable(self, body):
return isinstance(body, collections.Iterable)
def image_iterator(self, connection, headers, body):
if self._iterable(body):
return utils.chunkreadable(body)
else:
return ImageBodyIterator(body)
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
else:
return response.status
def _extract_params(self, actual_params, allowed_params):
"""
Extract a subset of keys from a dictionary. The filters key
will also be extracted, and each of its values will be returned
as an individual param.
:param actual_params: dict of keys to filter
:param allowed_params: list of keys that 'actual_params' will be
reduced to
:retval subset of 'params' dict
"""
result = {}
for param in actual_params:
if param in allowed_params:
result[param] = actual_params[param]
elif 'Parameters.member.' in param:
result[param] = actual_params[param]
return result
|
|
#!/usr/bin/env python
import click
import itertools
import json
import os
import sys
from pyspark import SparkConf
from pyspark.mllib.recommendation import ALS
from pyspark.sql.types import StructType
from pyspark.mllib.util import MLUtils
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.evaluation import MulticlassMetrics
from sklearn import datasets, svm
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import StratifiedShuffleSplit
sys.path.append("../algorithms")
import performance_metrics as pm
import content_based as cb
from singleton import SCSingleton
from timer import Timer
"""
This entire file is to provide a basic understanding of collaborative filtering
and its performance metrics.
* test_simple_rmse() tests RMSE with ALS model and a small dataset using sklearn (meaning it uses array).
* test_rmse() tests RMSE with ALS model and a large dataset using pyspark (meaning it uses RDD).
* test_simple_prfs() tests precision and recall with SVD model and a small dataset using sklearn (meaning it uses array).
* test_prfs() tests precision and recall with LogisticRegressionWithLBFGS model and a large dataset using pyspark (meaning it uses RDD).
This example assumes that you have installed
* pyspark
* psutil
* scala
* spark
* hadoop
"""
def test_simple_rmse():
""" Test RMSE as follows:
(1) train the ALS model with a subset of 15 values
(2) predict a subset of 15 values using the trained model
(3) calculate RMSE or how accurately the prediction is
in comparison to the known values
Values used to train the ALS model are based on a fictitious world where
5 users rate 4 items whether they like or dislike an item. If the user liked
the item, he will provide a rating of 1; otherwise, if the user disliked the
item, he will provide a rating of -1. No rating means that the user has not
rated the item. This data will be formatted in an RDD of [(userId, itemId, rating)].
Splitting these 15 values into training, validation, and test dataset is
randomly selected.
0 1 2 3 = itemID
userId = 0 1 -1 1 1
1 1 -1 -1
2 1 1 -1
3 -1 1
4 1 1 -1
0: (0, 0, 1)
1: (0, 1, -1)
2: (0, 2, 1)
3: (0, 3, 1)
4: (1, 1, 1)
5: (1, 2, -1)
6: (1, 3, -1)
7: (2, 0, 1)
8: (2, 1, 1)
9: (2, 1, -1)
10: (3, 0, -1)
11: (3, 2, 1)
12: (4, 0, 1)
13: (4, 1, 1)
14: (4, 3, -1)
"""
# load the data, an RDD of [(userId, itemId, rating)]
# split data into train (60%), validation (20%), test(20%)
# training (8): data to train the model
# validation (3): best performing approach using the validation data
# test (3): estimate accuracy of the selected approach
# TODO: possible split using sklearn's train_test_split?
trainingArray = [(4, 3, -1), (1, 1, 1), (3, 0, -1),
(4, 0, 1), (1, 2, -1), (0, 0, 1),
(2, 1, -1), (0, 2, 1), (1, 3, -1)]
validationArray = [(4, 1, 1), (3, 2, 1), (2, 1, 1)]
testArray = [(2, 0, 1), (0, 1, -1), (0, 3, 1)]
trainingRDD = scsingleton.sc.parallelize(trainingArray)
validationRDD = scsingleton.sc.parallelize(validationArray)
testRDD = scsingleton.sc.parallelize(testArray)
# run training algorithm to build the model
isExplicit = True
ranks = [3, 5, 7]
#numIters = [5] # default value
#lmbdas = [0.01] # default value
#blocks = -1 # default value
#nonnegative = False # default value
#seed = None # default value
#alpha = [0.01] # default value
model = None
bestModel = None
bestValidationRmse = float("inf")
bestRank = 0
# with validation
#for rank, numIter, lmbda in itertools.product(ranks, numIters, lmbdas):
for rank in ranks:
if isExplicit:
model = ALS.train(trainingRDD, rank)
else:
# TODO: figure out why trainImplicit crash
model = ALS.trainImplicit(trainingRDD, rank, iterations=5, alpha=0.01)
validationPredRDD = model.predictAll( validationRDD.map( lambda x: (x[0], x[1]) ) )
validationRmse = pm.calculate_rmse_using_rdd(validationRDD, validationPredRDD)
if (validationRmse < bestValidationRmse):
bestModel = model
bestValidationRmse = validationRmse
bestRank = rank
# make a prediction
testPredRDD = bestModel.predictAll( testRDD.map( lambda x: (x[0], x[1]) ) ).cache()
"""
# without validation
model = ALS.train(trainingRDD, rank=3)
testPredRDD = model.predictAll( testRDD.map( lambda x: (x[0], x[1]) ) )
"""
# calculate RMSE
testRmse = pm.calculate_rmse_using_rdd(testRDD, testPredRDD)
print "testRmse using RDD = ", testRmse
return
def test_rmse():
# TODO: revised so that it will take user's inputs instead of hardcoded values
movies_schema = None
ratings_schema = None
# load the schemas
with open("movielens_20m_movies_schema.json", "r") as json_schema_file:
movies_schema = StructType.fromJson(json.load(json_schema_file))
with open("movielens_20m_ratings_schema.json", "r") as json_schema_file:
ratings_schema = StructType.fromJson(json.load(json_schema_file))
# create a hdfs directory
os.system("hdfs dfs -mkdir datasets")
# load the json file into the hdfs directory
os.system("hdfs dfs -put movielens_10m_ratings.json.gz datasets/movielens_10m_ratings.json.gz")
# create a DataFrame based on the content of the json file
ratingsDF = scsingleton.sqlCtx.read.json("hdfs://localhost:9000/datasets/movielens_10m_ratings.json.gz", schema=ratings_schema)
# explicitly repartition RDD after loading so that more tasks can run on it in parallel
# by default, defaultMinPartitions == defaultParallelism == estimated # of cores across all of the machines in your cluster
ratingsDF = ratingsDF.repartition(scsingleton.sc.defaultParallelism * 3)
# parse ratings DataFrame into an RDD of [(userId, itemId, rating)]
ratingsRDD = ratingsDF.map(lambda row: (row.user_id, row.movie_id, row.rating))
ratingsRDD.cache()
# split data into train (60%), test (40%)
# TODO: add validation in the future? train (60%), validation (20%), test(20%)?
trainingRDD, testRDD = ratingsRDD.randomSplit([0.6, 0.4])
trainingRDD.cache()
testRDD.cache()
# run training algorithm to build the model
# without validation
with Timer() as t:
model = ALS.train(trainingRDD, rank=3)
print "ALS.train(trainingRDD, rank=3): %s seconds" % t.secs
# make a prediction
with Timer() as t:
testPredRDD = model.predictAll( testRDD.map( lambda x: (x[0], x[1]) ) ).cache()
print "testPredRDD: %s seconds" % t.secs
# calculate RMSE
with Timer() as t:
testRmse = pm.calculate_rmse_using_rdd(testRDD, testPredRDD)
print "testRmse: %s seconds" % t.secs
print "testRmse", testRmse
return
def test_simple_prfs():
""" Test Precision and Recall at N (as well as F1-score and Support) as follows:
(1) train the SVC model with a subset of sklearn's digits dataset
(2) predict what the number is using
the trained model and a subset of sklearn's digits dataset
(3) calculate "Precision and Recall at N" or how accurately it classifies the
digit in comparison to the known values
"""
# load the data
digits = datasets.load_digits()
data = digits.data
labels = digits.target
#print "data\n", data[0]
#print "labels\n", labels
print "numData = ", len(digits.data)
print "numTarget = ", len(digits.target)
# split data into train (60%), test(40%)
# TODO: add validation in the future? train (60%), validation (20%), test(20%)?
trainingData, testData, trainingLabel, testLabel = train_test_split(data, labels, test_size=0.4)
print "numTrainingData = ", len(trainingData)
print "numTestData = ", len(testData)
print "numTrainingLabel = ", len(trainingLabel)
print "numTestLabel == ", len(testLabel)
# train the model
model = svm.SVC(gamma=0.001, C=100)
model.fit(trainingData, trainingLabel)
# make a prediction
testPredLabel = model.predict(testData)
# calculate PRFS
print "testLabel"
print testLabel
print "testPredictedLabel"
print testPredLabel
p, r, f, s = pm.calculate_prfs_using_array(testLabel, testPredLabel)
print "precision =\n", p
print "recall =\n", r
print "fscore =\n", f
print "support =\n", s
return
def test_prfs():
# TODO: revised so that it will take user's inputs instead of hardcoded values
"""
Test Precision, Recall, Fscore, and Support on multiclass classification data
Input data: https://github.com/apache/spark/blob/master/data/mllib/sample_multiclass_classification_data.txt.
"""
# load the schemas (if existed)
# create a hdfs directory
#os.system("hdfs dfs -mkdir datasets")
# load the data file into the hdfs directory
os.system("hdfs dfs -put sample_multiclass_classification_data.txt datasets/sample_multiclass_classification_data.txt")
data = MLUtils.loadLibSVMFile(scsingleton.sc, "hdfs://localhost:9000/datasets/sample_multiclass_classification_data.txt")
# print data.take(1)
# ie. [LabeledPoint(1.0, (4,[0,1,2,3],[-0.222222,0.5,-0.762712,-0.833333]))]
# [ ( finalClassification, (numLabels, [label0, label1, label2, ..., labelN], [prob0, prob1, prob2, ..., probN]) ) ]
# split data into train (60%), test (40%)
trainingRDD, testRDD = data.randomSplit([0.6, 0.4])
trainingRDD.cache()
testRDD.cache()
with Timer() as t:
numTest = testRDD.count()
print "testRDD.count(): %s seconds" % t.secs
# run training algorithm to build the model
# without validation
with Timer() as t:
model = LogisticRegressionWithLBFGS.train(trainingRDD, numClasses=3)
print "LogisticRegressionWithLBFGS.train(trainingRDD, numClasses=3): %s seconds" % t.secs
# make a prediction
with Timer() as t:
testPredAndLabel = testRDD.map(lambda lp: (float(model.predict(lp.features)), lp.label))
print "testPredAndLabel: %s seconds" % t.secs
# calculate Precision, Recall, F1-score
metrics = MulticlassMetrics(testPredAndLabel)
print( "precision = %s" % metrics.precision() )
print( "recall = %s" % metrics.recall() )
print( "f1-score = %s" % metrics.fMeasure() )
# statistics by class
labels = data.map(lambda lp: lp.label).distinct().collect()
for label in sorted(labels):
print( "Class %s precision = %s" % (label, metrics.precision(label)) )
print( "Class %s recall = %s" % (label, metrics.recall(label)) )
print( "Class %s f1-score = %s" % (label, metrics.fMeasure(label, beta=1.0)) )
# weighted stats
print( "Weighted precision = %s" % metrics.weightedPrecision )
print( "Weighted recall = %s" % metrics.weightedRecall )
print( "Weighted f1-score = %s" % metrics.weightedFMeasure() )
print( "Weighted f(0.5)-score = %s" % metrics.weightedFMeasure(beta=0.5) )
print( "Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate )
return
if __name__ == "__main__":
# set up spark environment
conf = SparkConf().setAppName("test_precision_metrics").set("spark.executor.memory", "5g")
scsingleton = SCSingleton(conf)
test_simple_rmse()
test_rmse()
test_simple_prfs()
test_prfs()
|
|
"""
Wraps leaflet Polyline, Polygon, Rectangle, Circle, and CircleMarker
"""
from branca.element import MacroElement
from folium.map import Marker, Popup, Tooltip
from folium.utilities import validate_locations, get_bounds
from jinja2 import Template
def path_options(line=False, radius=False, **kwargs):
"""
Contains options and constants shared between vector overlays
(Polygon, Polyline, Circle, CircleMarker, and Rectangle).
Parameters
----------
stroke: Bool, True
Whether to draw stroke along the path.
Set it to false to disable borders on polygons or circles.
color: str, '#3388ff'
Stroke color.
weight: int, 3
Stroke width in pixels.
opacity: float, 1.0
Stroke opacity.
line_cap: str, 'round' (lineCap)
A string that defines shape to be used at the end of the stroke.
https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/stroke-linecap
line_join: str, 'round' (lineJoin)
A string that defines shape to be used at the corners of the stroke.
https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/stroke-linejoin
dash_array: str, None (dashArray)
A string that defines the stroke dash pattern.
Doesn't work on Canvas-powered layers in some old browsers.
https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/stroke-dasharray
dash_offset:, str, None (dashOffset)
A string that defines the distance into the dash pattern to start the dash.
Doesn't work on Canvas-powered layers in some old browsers.
https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/stroke-dashoffset
fill: Bool, False
Whether to fill the path with color.
Set it to false to disable filling on polygons or circles.
fill_color: str, default to `color` (fillColor)
Fill color. Defaults to the value of the color option.
fill_opacity: float, 0.2 (fillOpacity)
Fill opacity.
fill_rule: str, 'evenodd' (fillRule)
A string that defines how the inside of a shape is determined.
https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/fill-rule
bubbling_mouse_events: Bool, True (bubblingMouseEvents)
When true a mouse event on this path will trigger the same event on the
map (unless L.DomEvent.stopPropagation is used).
Note that the presence of `fill_color` will override `fill=False`.
See https://leafletjs.com/reference-1.6.0.html#path
"""
extra_options = {}
if line:
extra_options = {
'smoothFactor': kwargs.pop('smooth_factor', 1.0),
'noClip': kwargs.pop('no_clip', False),
}
if radius:
extra_options.update({'radius': radius})
color = kwargs.pop('color', '#3388ff')
fill_color = kwargs.pop('fill_color', False)
if fill_color:
fill = True
elif not fill_color:
fill_color = color
fill = kwargs.pop('fill', False)
default = {
'stroke': kwargs.pop('stroke', True),
'color': color,
'weight': kwargs.pop('weight', 3),
'opacity': kwargs.pop('opacity', 1.0),
'lineCap': kwargs.pop('line_cap', 'round'),
'lineJoin': kwargs.pop('line_join', 'round'),
'dashArray': kwargs.pop('dash_array', None),
'dashOffset': kwargs.pop('dash_offset', None),
'fill': fill,
'fillColor': fill_color,
'fillOpacity': kwargs.pop('fill_opacity', 0.2),
'fillRule': kwargs.pop('fill_rule', 'evenodd'),
'bubblingMouseEvents': kwargs.pop('bubbling_mouse_events', True),
}
default.update(extra_options)
return default
class BaseMultiLocation(MacroElement):
"""Base class for vector classes with multiple coordinates.
Not for direct consumption
"""
def __init__(self, locations, popup=None, tooltip=None):
super(BaseMultiLocation, self).__init__()
self.locations = validate_locations(locations)
if popup is not None:
self.add_child(popup if isinstance(popup, Popup)
else Popup(str(popup)))
if tooltip is not None:
self.add_child(tooltip if isinstance(tooltip, Tooltip)
else Tooltip(str(tooltip)))
def _get_self_bounds(self):
"""Compute the bounds of the object itself."""
return get_bounds(self.locations)
class PolyLine(BaseMultiLocation):
"""Draw polyline overlays on a map.
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
popup: str or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
smooth_factor: float, default 1.0
How much to simplify the polyline on each zoom level.
More means better performance and smoother look,
and less means more accurate representation.
no_clip: Bool, default False
Disable polyline clipping.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference-1.6.0.html#polyline
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.polyline(
{{ this.locations|tojson }},
{{ this.options|tojson }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def __init__(self, locations, popup=None, tooltip=None, **kwargs):
super(PolyLine, self).__init__(locations, popup=popup, tooltip=tooltip)
self._name = 'PolyLine'
self.options = path_options(line=True, **kwargs)
class Polygon(BaseMultiLocation):
"""Draw polygon overlays on a map.
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference-1.6.0.html#polygon
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.polygon(
{{ this.locations|tojson }},
{{ this.options|tojson }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def __init__(self, locations, popup=None, tooltip=None, **kwargs):
super(Polygon, self).__init__(locations, popup=popup, tooltip=tooltip)
self._name = 'Polygon'
self.options = path_options(line=True, **kwargs)
class Rectangle(BaseMultiLocation):
"""Draw rectangle overlays on a map.
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
bounds: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference-1.6.0.html#rectangle
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.rectangle(
{{ this.locations|tojson }},
{{ this.options|tojson }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def __init__(self, bounds, popup=None, tooltip=None, **kwargs):
super(Rectangle, self).__init__(bounds, popup=popup, tooltip=tooltip)
self._name = 'rectangle'
self.options = path_options(line=True, **kwargs)
class Circle(Marker):
"""
Class for drawing circle overlays on a map.
It's an approximation and starts to diverge from a real circle closer to
the poles (due to projection distortion).
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
location: tuple[float, float]
Latitude and Longitude pair (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
radius: float
Radius of the circle, in meters.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference-1.6.0.html#circle
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.circle(
{{ this.location|tojson }},
{{ this.options|tojson }}
).addTo({{ this._parent.get_name() }});
{% endmacro %}
""")
def __init__(self, location=None, radius=50, popup=None, tooltip=None, **kwargs):
super(Circle, self).__init__(location, popup=popup, tooltip=tooltip)
self._name = 'circle'
self.options = path_options(line=False, radius=radius, **kwargs)
class CircleMarker(Marker):
"""
A circle of a fixed size with radius specified in pixels.
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
location: tuple[float, float]
Latitude and Longitude pair (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
radius: float, default 10
Radius of the circle marker, in pixels.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference-1.6.0.html#circlemarker
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.circleMarker(
{{ this.location|tojson }},
{{ this.options|tojson }}
).addTo({{ this._parent.get_name() }});
{% endmacro %}
""")
def __init__(self, location=None, radius=10, popup=None, tooltip=None, **kwargs):
super(CircleMarker, self).__init__(location, popup=popup,
tooltip=tooltip)
self._name = 'CircleMarker'
self.options = path_options(line=False, radius=radius, **kwargs)
|
|
# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for filter.py."""
import unittest
from filter import _CategoryFilter as CategoryFilter
from filter import validate_filter_rules
from filter import FilterConfiguration
# On Testing __eq__() and __ne__():
#
# In the tests below, we deliberately do not use assertEqual() or
# assertNotEquals() to test __eq__() or __ne__(). We do this to be
# very explicit about what we are testing, especially in the case
# of assertNotEquals().
#
# Part of the reason is that it is not immediately clear what
# expression the unittest module uses to assert "not equals" -- the
# negation of __eq__() or __ne__(), which are not necessarily
# equivalent expresions in Python. For example, from Python's "Data
# Model" documentation--
#
# "There are no implied relationships among the comparison
# operators. The truth of x==y does not imply that x!=y is
# false. Accordingly, when defining __eq__(), one should
# also define __ne__() so that the operators will behave as
# expected."
#
# (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
class ValidateFilterRulesTest(unittest.TestCase):
"""Tests validate_filter_rules() function."""
def test_validate_filter_rules(self):
all_categories = ["tabs", "whitespace", "build/include"]
bad_rules = [
"tabs",
"*tabs",
" tabs",
" +tabs",
"+whitespace/newline",
"+xxx",
]
good_rules = [
"+tabs",
"-tabs",
"+build"
]
for rule in bad_rules:
self.assertRaises(ValueError, validate_filter_rules,
[rule], all_categories)
for rule in good_rules:
# This works: no error.
validate_filter_rules([rule], all_categories)
class CategoryFilterTest(unittest.TestCase):
"""Tests CategoryFilter class."""
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
filter = CategoryFilter(["+"])
self.assertEqual(["+"], filter._filter_rules)
def test_init_default_arguments(self):
"""Test __init__ method default arguments."""
filter = CategoryFilter()
self.assertEqual([], filter._filter_rules)
def test_str(self):
"""Test __str__ "to string" operator."""
filter = CategoryFilter(["+a", "-b"])
self.assertEqual(str(filter), "+a,-b")
def test_eq(self):
"""Test __eq__ equality function."""
filter1 = CategoryFilter(["+a", "+b"])
filter2 = CategoryFilter(["+a", "+b"])
filter3 = CategoryFilter(["+b", "+a"])
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(filter1.__eq__(filter2))
self.assertFalse(filter1.__eq__(filter3))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
def test_should_check(self):
"""Test should_check() method."""
filter = CategoryFilter()
self.assertTrue(filter.should_check("everything"))
# Check a second time to exercise cache.
self.assertTrue(filter.should_check("everything"))
filter = CategoryFilter(["-"])
self.assertFalse(filter.should_check("anything"))
# Check a second time to exercise cache.
self.assertFalse(filter.should_check("anything"))
filter = CategoryFilter(["-", "+ab"])
self.assertTrue(filter.should_check("abc"))
self.assertFalse(filter.should_check("a"))
filter = CategoryFilter(["+", "-ab"])
self.assertFalse(filter.should_check("abc"))
self.assertTrue(filter.should_check("a"))
class FilterConfigurationTest(unittest.TestCase):
"""Tests FilterConfiguration class."""
def _config(self, base_rules, path_specific, user_rules):
"""Return a FilterConfiguration instance."""
return FilterConfiguration(base_rules=base_rules,
path_specific=path_specific,
user_rules=user_rules)
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
# We use parameter values that are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
config = self._config(base_rules, path_specific, user_rules)
self.assertEqual(base_rules, config._base_rules)
self.assertEqual(path_specific, config._path_specific)
self.assertEqual(user_rules, config._user_rules)
def test_default_arguments(self):
# Test that the attributes are getting set correctly to the defaults.
config = FilterConfiguration()
self.assertEqual([], config._base_rules)
self.assertEqual([], config._path_specific)
self.assertEqual([], config._user_rules)
def test_eq(self):
"""Test __eq__ method."""
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration()))
# Verify that a difference in any argument causes equality to fail.
config = FilterConfiguration()
# These parameter values are different from the defaults.
base_rules = ["-"]
path_specific = [(["path"], ["+a"])]
user_rules = ["+"]
self.assertFalse(config.__eq__(FilterConfiguration(
base_rules=base_rules)))
self.assertFalse(config.__eq__(FilterConfiguration(
path_specific=path_specific)))
self.assertFalse(config.__eq__(FilterConfiguration(
user_rules=user_rules)))
def test_ne(self):
"""Test __ne__ method."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration()))
def test_base_rules(self):
"""Test effect of base_rules on should_check()."""
base_rules = ["-b"]
path_specific = []
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertTrue(config.should_check("a", "path"))
self.assertFalse(config.should_check("b", "path"))
def test_path_specific(self):
"""Test effect of path_rules_specifier on should_check()."""
base_rules = ["-"]
path_specific = [(["path1"], ["+b"]),
(["path2"], ["+c"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("c", "path1"))
self.assertTrue(config.should_check("c", "path2"))
# Test that first match takes precedence.
self.assertFalse(config.should_check("c", "path2/path1"))
def test_path_with_different_case(self):
"""Test a path that differs only in case."""
base_rules = ["-"]
path_specific = [(["Foo/"], ["+whitespace"])]
user_rules = []
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt"))
self.assertTrue(config.should_check("whitespace", "Foo/bar.txt"))
# Test different case.
self.assertTrue(config.should_check("whitespace", "FOO/bar.txt"))
def test_user_rules(self):
"""Test effect of user_rules on should_check()."""
base_rules = ["-"]
path_specific = []
user_rules = ["+b"]
config = self._config(base_rules, path_specific, user_rules)
self.assertFalse(config.should_check("a", "path"))
self.assertTrue(config.should_check("b", "path"))
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for measuring and maximizing compound divergence across splits.
The definition of compound divergence is from:
https://arxiv.org/abs/1912.09713
"""
import collections
def _compute_divergence(compound_counts_1, compound_counts_2, coef=0.1):
"""Compute compound divergence using Chernoff coefficient."""
sum_1 = sum(compound_counts_1.values())
sum_2 = sum(compound_counts_2.values())
frequencies_1 = {
key: float(count) / sum_1 for key, count in compound_counts_1.items()
}
frequencies_2 = {
key: float(count) / sum_2 for key, count in compound_counts_2.items()
}
similarity = 0.0
for compound, frequency_1 in frequencies_1.items():
if compound not in frequencies_2:
# Contribution will be 0.
continue
frequency_2 = frequencies_2[compound]
similarity += frequency_1**coef * frequency_2**(1.0 - coef)
return 1.0 - similarity
def _get_all_compounds(examples, get_compounds_fn):
compounds_to_count = collections.Counter()
for example in examples:
compounds_to_count.update(get_compounds_fn(example))
return compounds_to_count
def measure_example_divergence(examples_1, examples_2, get_compounds_fn):
compounds_1 = _get_all_compounds(examples_1, get_compounds_fn)
compounds_2 = _get_all_compounds(examples_2, get_compounds_fn)
return _compute_divergence(compounds_1, compounds_2)
def _get_mcd_idx_1(divergence, examples_1, compounds_1, compounds_2, atoms,
get_compounds_fn, get_atoms_fn):
"""Return index of example to swap from examples_1 to examples_2."""
for example_idx, example in enumerate(examples_1):
# Ensure example does not contain any atom that appears only once in
# examples_1. Otherwise, we would violate the atom constraint.
if _contains_atom(example, atoms, get_atoms_fn):
continue
# Compute the new compound divergence if we move the example from examples_1
# to examples_2, ignoring the effect of moving some other example in
# examples_2 to examples_1 for now.
# TODO(petershaw): This could potentially be computed more effeciently
# for larger numbers of compounds by incrementally computing the change
# in compound divergence over affected compound counts only, and using
# this as an estimate for the overall change in compound divergence.
compounds_example = get_compounds_fn(example)
compounds_1_copy = compounds_1.copy()
compounds_1_copy.subtract(compounds_example)
compounds_2_copy = compounds_2.copy()
compounds_2_copy.update(compounds_example)
new_divergence = _compute_divergence(compounds_1_copy, compounds_2_copy)
# Return the first example that we find that would increase compound
# divergence.
if new_divergence > divergence:
return example_idx, example
return None, None
def _get_mcd_idx_2(divergence, examples_2, compounds_1, compounds_2,
get_compounds_fn):
"""Return index of example to swap from examples_2 to examples_1."""
for example_idx, example in enumerate(examples_2):
# Compute the change in compound divergence from moving the example from
# examples_2 to examples_1.
# TODO(petershaw): This could potentially be computed more effeciently
# for larger numbers of compounds by incrementally computing the change
# in compound divergence over affected compound counts only, and using
# this as an estimate for the overall change in compound divergence.
compounds_example = get_compounds_fn(example)
compounds_1_copy = compounds_1.copy()
compounds_1_copy.update(compounds_example)
compounds_2_copy = compounds_2.copy()
compounds_2_copy.subtract(compounds_example)
# Return the first example that we find that would increase compuond
# divergence.
new_divergence = _compute_divergence(compounds_1_copy, compounds_2_copy)
if new_divergence > divergence:
return example_idx, example
return None, None
def maximize_divergence(examples_1, examples_2, get_compounds_fn, get_atoms_fn,
max_iterations, max_divergence, min_atom_count):
"""Approx. maximizes compound divergence by iteratively swapping examples."""
for iteration_num in range(max_iterations):
atoms_1_single = _get_atoms_below_count(
examples_1, get_atoms_fn, atom_count=min_atom_count)
# Compute the compound divergence for the current split of examples.
compounds_1 = _get_all_compounds(
examples_1, get_compounds_fn=get_compounds_fn)
compounds_2 = _get_all_compounds(
examples_2, get_compounds_fn=get_compounds_fn)
divergence = _compute_divergence(compounds_1, compounds_2)
print("Iteration %s divergence: %s" % (iteration_num, divergence))
if max_divergence and divergence >= max_divergence:
print("Reached divergence target.")
break
# Find a new pair of examples to swap to increase compound divergence.
# First, we find an example in examples_1 that would increase compound
# divergence if moved to examples_2, and would not violate the atom
# constraint.
example_1_idx, example_1 = _get_mcd_idx_1(
divergence,
examples_1,
compounds_1,
compounds_2,
atoms_1_single,
get_compounds_fn=get_compounds_fn,
get_atoms_fn=get_atoms_fn)
if not example_1:
print("Cannot find example_1 idx to swap.")
break
compounds_example_1 = get_compounds_fn(example_1)
compounds_1.subtract(compounds_example_1)
compounds_2.update(compounds_example_1)
# Second, we find an example in examples_2 that would increase compound
# divergence if moved to examples_1, taking into account the effect of
# moving the example selected above to examples_2 first.
example_2_idx, example_2 = _get_mcd_idx_2(
divergence,
examples_2,
compounds_1,
compounds_2,
get_compounds_fn=get_compounds_fn)
if not example_2:
print("Cannot find example_2 idx to swap.")
break
# Swap the examples.
print("Swapping %s and %s." % (example_1, example_2))
del examples_1[example_1_idx]
examples_1.append(example_2)
del examples_2[example_2_idx]
examples_2.append(example_1)
print("Max iterations reached.")
return examples_1, examples_2
def get_all_atoms(examples, get_atoms_fn):
atoms = set()
for example in examples:
atoms |= get_atoms_fn(example)
return atoms
def _get_swap_idx(examples, atoms, get_atoms_fn, contains=True):
"""Returns an example based on a constraint over atoms.
If `contains` is True, returns an example in `examples` that contains any
atom in `atoms`.
If `contains` is False, returns an example in `examples` that does not contain
any atom in `atoms`.
Args:
examples: List of examples.
atoms: Set of atoms.
get_atoms_fn: Function from an example to set of atoms.
contains: Bool (see function docstring for usage).
Returns:
(example_idx, example) for example meeting criteria in docstring.
"""
for example_idx, example in enumerate(examples):
example_contains_atom = _contains_atom(example, atoms, get_atoms_fn)
if example_contains_atom == contains:
return example_idx, example
if contains:
raise ValueError("Could not find example that contains any atoms in: %s" %
atoms)
else:
raise ValueError(
"Could not find example that doesn't contain any atoms in: %s" % atoms)
def balance_atoms(examples_1, examples_2, get_atoms_fn, max_iterations,
min_atom_count):
"""Attempts to ensure every atom is represented in the first set."""
for iteration_num in range(max_iterations):
# Find atoms that appear >= min_atom_count in `examples_1`.
atoms_1_above = _get_atoms_above_count(examples_1, get_atoms_fn,
min_atom_count)
# Find atoms in `examples_2`.
atoms_2 = get_all_atoms(examples_2, get_atoms_fn=get_atoms_fn)
# Find atoms in examples_2 not in examples_1 at least `min_atom_count`.
atoms_2_m_1 = atoms_2 - atoms_1_above
# If there are no atoms in `atoms_2` not in in examples_1, then we have
# reached our goal state.
if not atoms_2_m_1:
print("Atoms are balanced after %s iterations." % iteration_num)
return examples_1, examples_2
# Find atoms that appear <= min_atom_count in `examples_1`.
atoms_1_below = _get_atoms_below_count(examples_1, get_atoms_fn,
min_atom_count)
# Find candidates to swap.
# First, find an example in examples_1 that does not contain any atoms
# that appear only once in examples_1. Otherwise, moving the examples to
# examples_2 can take us farther from our goal state.
example_1_idx, example_1 = _get_swap_idx(
examples_1, atoms_1_below, get_atoms_fn=get_atoms_fn, contains=False)
# Second, find an example in examples_2 that contains one of the atoms
# that is currently missing from examples_1.
example_2_idx, example_2 = _get_swap_idx(
examples_2, atoms_2_m_1, get_atoms_fn=get_atoms_fn, contains=True)
# Swap the examples.
del examples_1[example_1_idx]
examples_1.append(example_2)
del examples_2[example_2_idx]
examples_2.append(example_1)
raise ValueError("Could not find split that balances atoms [%s] [%s]" %
(atoms_1_below, atoms_2_m_1))
def _contains_atom(example, atoms, get_atoms_fn):
"""Returns True if example contains any atom in atoms."""
example_atoms = get_atoms_fn(example)
for example_atom in example_atoms:
if example_atom in atoms:
return True
return False
def _get_atoms_to_count(examples, get_atoms_fn):
"""Return map of atom to count of examples containing atom."""
atoms_to_count = collections.defaultdict(int)
for example in examples:
atoms = get_atoms_fn(example)
for atom in atoms:
atoms_to_count[atom] += 1
return atoms_to_count
def _get_atoms_above_count(examples, get_atoms_fn, atom_count):
"""Return set of atoms that appear >= atom_count times across all examples."""
atoms_to_count = _get_atoms_to_count(examples, get_atoms_fn)
atoms = set(
[atom for atom, count in atoms_to_count.items() if count >= atom_count])
return atoms
def _get_atoms_below_count(examples, get_atoms_fn, atom_count):
"""Return set of atoms that appear <= atom_count times across all examples."""
atoms_to_count = _get_atoms_to_count(examples, get_atoms_fn)
atoms = set(
[atom for atom, count in atoms_to_count.items() if count <= atom_count])
return atoms
def print_compound_frequencies(examples_1, examples_2, get_compounds_fn):
"""Prints compound frequencies for debugging."""
compound_counts_1 = _get_all_compounds(
examples_1, get_compounds_fn=get_compounds_fn)
compound_counts_2 = _get_all_compounds(
examples_2, get_compounds_fn=get_compounds_fn)
sum_1 = sum(compound_counts_1.values())
sum_2 = sum(compound_counts_2.values())
frequencies_1 = {
key: float(count) / sum_1 for key, count in compound_counts_1.items()
}
frequencies_2 = {
key: float(count) / sum_2 for key, count in compound_counts_2.items()
}
for key in set(compound_counts_1.keys()).union(set(compound_counts_2.keys())):
frequency_1 = frequencies_1.get(key, 0.0)
frequency_2 = frequencies_2.get(key, 0.0)
print("%s: %s - %s" % (key, frequency_1, frequency_2))
def swap_examples(examples_1,
examples_2,
get_compounds_fn,
get_atoms_fn,
max_iterations=1000,
max_divergence=None,
min_atom_count=1):
"""Swaps examples between examples_1 and examples_2 to maximize divergence.
This approach first balances atoms to ensure that every atom that appears
in examples_2 appears in examples_1 at least `min_atom_count` times.
Then, the algorithm identifies a swap between each collection of examples
that does not violate the atom constraint, but increases compound divergence.
The procedure breaks when no swap that increases compound divergence can
be found, or max_iterations or max_divergence is reached.
To generate different splits, a different initial random split into examples_1
and examples_2 can be used before calling this function.
Args:
examples_1: A list of examples of type E.
examples_2: A list of examples of type E.
get_compounds_fn: A function from E to a collections.Counter of strings
representing compounds.
get_atoms_fn: A function from E to a set of strings representing atoms.
max_iterations: A maximum number of iterations (i.e. swap) to run for.
max_divergence: If not None, will break if compound divergence exceeds this
value.
min_atom_count: Minimum amount of times an atom in examples_2 should appear
in examples_1.
Returns:
(examples_1, examples_2) where each list is the same length and type as the
corresponding input, but examples have been swapped per the method described
above.
"""
examples_1, examples_2 = balance_atoms(examples_1, examples_2, get_atoms_fn,
max_iterations, min_atom_count)
examples_1, examples_2 = maximize_divergence(examples_1, examples_2,
get_compounds_fn, get_atoms_fn,
max_iterations, max_divergence,
min_atom_count)
print_compound_frequencies(examples_1, examples_2, get_compounds_fn)
return examples_1, examples_2
|
|
"""Test letsencrypt.display.ops."""
import os
import sys
import tempfile
import unittest
import mock
import zope.component
from acme import jose
from acme import messages
from letsencrypt import account
from letsencrypt import interfaces
from letsencrypt.display import util as display_util
from letsencrypt.tests import test_util
KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
class ChoosePluginTest(unittest.TestCase):
"""Tests for letsencrypt.display.ops.choose_plugin."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
self.mock_apache = mock.Mock(
description_with_name="a", misconfigured=True)
self.mock_stand = mock.Mock(
description_with_name="s", misconfigured=False)
self.mock_stand.init().more_info.return_value = "standalone"
self.plugins = [
self.mock_apache,
self.mock_stand,
]
def _call(self):
from letsencrypt.display.ops import choose_plugin
return choose_plugin(self.plugins, "Question?")
@mock.patch("letsencrypt.display.ops.util")
def test_successful_choice(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertEqual(self.mock_apache, self._call())
@mock.patch("letsencrypt.display.ops.util")
def test_more_info(self, mock_util):
mock_util().menu.side_effect = [
(display_util.HELP, 0),
(display_util.HELP, 1),
(display_util.OK, 1),
]
self.assertEqual(self.mock_stand, self._call())
self.assertEqual(mock_util().notification.call_count, 2)
@mock.patch("letsencrypt.display.ops.util")
def test_no_choice(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 0)
self.assertTrue(self._call() is None)
class PickPluginTest(unittest.TestCase):
"""Tests for letsencrypt.display.ops.pick_plugin."""
def setUp(self):
self.config = mock.Mock()
self.default = None
self.reg = mock.MagicMock()
self.question = "Question?"
self.ifaces = []
def _call(self):
from letsencrypt.display.ops import pick_plugin
return pick_plugin(self.config, self.default, self.reg,
self.question, self.ifaces)
def test_default_provided(self):
self.default = "foo"
self._call()
self.assertEqual(1, self.reg.filter.call_count)
def test_no_default(self):
self._call()
self.assertEqual(1, self.reg.ifaces.call_count)
def test_no_candidate(self):
self.assertTrue(self._call() is None)
def test_single(self):
plugin_ep = mock.MagicMock()
plugin_ep.init.return_value = "foo"
self.reg.ifaces().verify().available.return_value = {"bar": plugin_ep}
self.assertEqual("foo", self._call())
def test_multiple(self):
plugin_ep = mock.MagicMock()
plugin_ep.init.return_value = "foo"
self.reg.ifaces().verify().available.return_value = {
"bar": plugin_ep,
"baz": plugin_ep,
}
with mock.patch("letsencrypt.display.ops.choose_plugin") as mock_choose:
mock_choose.return_value = plugin_ep
self.assertEqual("foo", self._call())
mock_choose.assert_called_once_with(
[plugin_ep, plugin_ep], self.question)
def test_choose_plugin_none(self):
self.reg.ifaces().verify().available.return_value = {
"bar": None,
"baz": None,
}
with mock.patch("letsencrypt.display.ops.choose_plugin") as mock_choose:
mock_choose.return_value = None
self.assertTrue(self._call() is None)
class ConveniencePickPluginTest(unittest.TestCase):
"""Tests for letsencrypt.display.ops.pick_*."""
def _test(self, fun, ifaces):
config = mock.Mock()
default = mock.Mock()
plugins = mock.Mock()
with mock.patch("letsencrypt.display.ops.pick_plugin") as mock_p:
mock_p.return_value = "foo"
self.assertEqual("foo", fun(config, default, plugins, "Question?"))
mock_p.assert_called_once_with(
config, default, plugins, "Question?", ifaces)
def test_authenticator(self):
from letsencrypt.display.ops import pick_authenticator
self._test(pick_authenticator, (interfaces.IAuthenticator,))
def test_installer(self):
from letsencrypt.display.ops import pick_installer
self._test(pick_installer, (interfaces.IInstaller,))
def test_configurator(self):
from letsencrypt.display.ops import pick_configurator
self._test(pick_configurator, (
interfaces.IAuthenticator, interfaces.IInstaller))
class GetEmailTest(unittest.TestCase):
"""Tests for letsencrypt.display.ops.get_email."""
def setUp(self):
mock_display = mock.MagicMock()
self.input = mock_display.input
zope.component.provideUtility(mock_display, interfaces.IDisplay)
@classmethod
def _call(cls):
from letsencrypt.display.ops import get_email
return get_email()
def test_cancel_none(self):
self.input.return_value = (display_util.CANCEL, "foo@bar.baz")
self.assertTrue(self._call() is None)
def test_ok_safe(self):
self.input.return_value = (display_util.OK, "foo@bar.baz")
with mock.patch("letsencrypt.display.ops.le_util"
".safe_email") as mock_safe_email:
mock_safe_email.return_value = True
self.assertTrue(self._call() is "foo@bar.baz")
def test_ok_not_safe(self):
self.input.return_value = (display_util.OK, "foo@bar.baz")
with mock.patch("letsencrypt.display.ops.le_util"
".safe_email") as mock_safe_email:
mock_safe_email.side_effect = [False, True]
self.assertTrue(self._call() is "foo@bar.baz")
class ChooseAccountTest(unittest.TestCase):
"""Tests for letsencrypt.display.ops.choose_account."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
self.accounts_dir = tempfile.mkdtemp("accounts")
self.account_keys_dir = os.path.join(self.accounts_dir, "keys")
os.makedirs(self.account_keys_dir, 0o700)
self.config = mock.MagicMock(
accounts_dir=self.accounts_dir,
account_keys_dir=self.account_keys_dir,
server="letsencrypt-demo.org")
self.key = KEY
self.acc1 = account.Account(messages.RegistrationResource(
uri=None, new_authzr_uri=None, body=messages.Registration.from_data(
email="email1@g.com")), self.key)
self.acc2 = account.Account(messages.RegistrationResource(
uri=None, new_authzr_uri=None, body=messages.Registration.from_data(
email="email2@g.com", phone="phone")), self.key)
@classmethod
def _call(cls, accounts):
from letsencrypt.display import ops
return ops.choose_account(accounts)
@mock.patch("letsencrypt.display.ops.util")
def test_one(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertEqual(self._call([self.acc1]), self.acc1)
@mock.patch("letsencrypt.display.ops.util")
def test_two(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertEqual(self._call([self.acc1, self.acc2]), self.acc2)
@mock.patch("letsencrypt.display.ops.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertTrue(self._call([self.acc1, self.acc2]) is None)
class GenSSLLabURLs(unittest.TestCase):
"""Loose test of _gen_ssl_lab_urls. URL can change easily in the future."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
@classmethod
def _call(cls, domains):
from letsencrypt.display.ops import _gen_ssl_lab_urls
return _gen_ssl_lab_urls(domains)
def test_zero(self):
self.assertEqual(self._call([]), [])
def test_two(self):
urls = self._call(["eff.org", "umich.edu"])
self.assertTrue("eff.org" in urls[0])
self.assertTrue("umich.edu" in urls[1])
class GenHttpsNamesTest(unittest.TestCase):
"""Test _gen_https_names."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
@classmethod
def _call(cls, domains):
from letsencrypt.display.ops import _gen_https_names
return _gen_https_names(domains)
def test_zero(self):
self.assertEqual(self._call([]), "")
def test_one(self):
doms = [
"example.com",
"asllkjsadfljasdf.c",
]
for dom in doms:
self.assertEqual(self._call([dom]), "https://%s" % dom)
def test_two(self):
domains_list = [
["foo.bar.org", "bar.org"],
["paypal.google.facebook.live.com", "*.zombo.example.com"],
]
for doms in domains_list:
self.assertEqual(
self._call(doms),
"https://{dom[0]} and https://{dom[1]}".format(dom=doms))
def test_three(self):
doms = ["a.org", "b.org", "c.org"]
# We use an oxford comma
self.assertEqual(
self._call(doms),
"https://{dom[0]}, https://{dom[1]}, and https://{dom[2]}".format(
dom=doms))
def test_four(self):
doms = ["a.org", "b.org", "c.org", "d.org"]
exp = ("https://{dom[0]}, https://{dom[1]}, https://{dom[2]}, "
"and https://{dom[3]}".format(dom=doms))
self.assertEqual(self._call(doms), exp)
class ChooseNamesTest(unittest.TestCase):
"""Test choose names."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
self.mock_install = mock.MagicMock()
@classmethod
def _call(cls, installer):
from letsencrypt.display.ops import choose_names
return choose_names(installer)
@mock.patch("letsencrypt.display.ops._choose_names_manually")
def test_no_installer(self, mock_manual):
self._call(None)
self.assertEqual(mock_manual.call_count, 1)
@mock.patch("letsencrypt.display.ops.util")
def test_no_installer_cancel(self, mock_util):
mock_util().input.return_value = (display_util.CANCEL, [])
self.assertEqual(self._call(None), [])
@mock.patch("letsencrypt.display.ops.util")
def test_no_names_choose(self, mock_util):
self.mock_install().get_all_names.return_value = set()
mock_util().yesno.return_value = True
domain = "example.com"
mock_util().input.return_value = (display_util.OK, domain)
actual_doms = self._call(self.mock_install)
self.assertEqual(mock_util().input.call_count, 1)
self.assertEqual(actual_doms, [domain])
@mock.patch("letsencrypt.display.ops.util")
def test_no_names_quit(self, mock_util):
self.mock_install().get_all_names.return_value = set()
mock_util().yesno.return_value = False
self.assertEqual(self._call(self.mock_install), [])
@mock.patch("letsencrypt.display.ops.util")
def test_filter_names_valid_return(self, mock_util):
self.mock_install.get_all_names.return_value = set(["example.com"])
mock_util().checklist.return_value = (display_util.OK, ["example.com"])
names = self._call(self.mock_install)
self.assertEqual(names, ["example.com"])
self.assertEqual(mock_util().checklist.call_count, 1)
@mock.patch("letsencrypt.display.ops.util")
def test_filter_names_nothing_selected(self, mock_util):
self.mock_install.get_all_names.return_value = set(["example.com"])
mock_util().checklist.return_value = (display_util.OK, [])
self.assertEqual(self._call(self.mock_install), [])
@mock.patch("letsencrypt.display.ops.util")
def test_filter_names_cancel(self, mock_util):
self.mock_install.get_all_names.return_value = set(["example.com"])
mock_util().checklist.return_value = (
display_util.CANCEL, ["example.com"])
self.assertEqual(self._call(self.mock_install), [])
class SuccessInstallationTest(unittest.TestCase):
# pylint: disable=too-few-public-methods
"""Test the success installation message."""
@classmethod
def _call(cls, names):
from letsencrypt.display.ops import success_installation
success_installation(names)
@mock.patch("letsencrypt.display.ops.util")
def test_success_installation(self, mock_util):
mock_util().notification.return_value = None
names = ["example.com", "abc.com"]
self._call(names)
self.assertEqual(mock_util().notification.call_count, 1)
arg = mock_util().notification.call_args_list[0][0][0]
for name in names:
self.assertTrue(name in arg)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
#!/usr/bin/env python3
import subprocess
import sys
import logging
from datetime import datetime
from pathlib import Path
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
# --- Work Directory Setup -----------------------------------------------------
RUN_ID = "local-hierarchy-sharedfs-" + datetime.now().strftime("%s")
TOP_DIR = Path.cwd()
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Properties ---------------------------------------------------------------
props = Properties()
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.data.configuration"] = "sharedfs"
props.write()
# --- Sites --------------------------------------------------------------------
sites = """
pegasus: "5.0"
sites:
-
name: "CCG"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
directories:
-
type: "sharedScratch"
path: "/nfs/bamboo/scratch-90-days/CCG/scratch/{run_id}"
fileServers:
-
operation: "all"
url: "scp://bamboo@corbusier.isi.edu:2222/nfs/bamboo/scratch-90-days/CCG/scratch/{run_id}"
-
type: "localStorage"
path: "/nfs/bamboo/scratch-90-days/CCG/outputs"
fileServers:
-
operation: "all"
url: "scp://bamboo@corbusier.isi.edu:2222/nfs/bamboo/scratch-90-days/CCG/outputs/{run_id}"
grids:
-
type: "batch"
contact: "corbusier.isi.edu"
scheduler: "slurm"
jobtype: "compute"
-
type: "batch"
contact: "corbusier.isi.edu"
scheduler: "slurm"
jobtype: "compute"
profiles:
env:
PEGASUS_HOME: "{cluster_pegasus_home}"
pegasus:
# SSH is the style to use for Bosco SSH submits.
style: ssh
# Works around bug in the HTCondor GAHP, that does not
# set the remote directory
change.dir: 'true'
# the key to use for scp transfers
SSH_PRIVATE_KEY: /scitech/home/bamboo/.ssh/workflow_id_rsa
-
name: "local"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
directories:
-
type: "sharedScratch"
path: "{work_dir}/local-site/scratch"
fileServers:
-
operation: "all"
url: "file://{work_dir}/local-site/scratch"
-
type: "localStorage"
path: "{work_dir}/outputs/local-site"
fileServers:
-
operation: "all"
url: "file://{work_dir}/outputs/local-site"
""".format(
run_id=RUN_ID, work_dir=str(WORK_DIR), cluster_pegasus_home="/opt/pegasus"
)
with open("sites.yml", "w") as f:
f.write(sites)
# --- Transformations ----------------------------------------------------------
try:
pegasus_config = subprocess.run(
["pegasus-config", "--bin"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except FileNotFoundError as e:
print("Unable to find pegasus-config")
assert pegasus_config.returncode == 0
PEGASUS_BIN_DIR = pegasus_config.stdout.decode().strip()
transformations = """
pegasus: "5.0"
transformations:
-
namespace: "diamond"
name: "analyze"
version: "4.0"
sites:
-
name: "local"
type: "stageable"
pfn: "{pegasus_bin_dir}/pegasus-keg"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
-
namespace: "diamond"
name: "findrange"
version: "4.0"
sites:
-
name: "local"
type: "stageable"
pfn: "{pegasus_bin_dir}/pegasus-keg"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
-
namespace: "diamond"
name: "preprocess"
version: "4.0"
sites:
-
name: "local"
type: "stageable"
pfn: "{pegasus_bin_dir}/pegasus-keg"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
-
namespace: "level1"
name: "sleep"
sites:
-
name: "CCG"
type: "installed"
pfn: "/bin/sleep"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
-
namespace: "level2"
name: "sleep"
sites:
-
name: "CCG"
type: "installed"
pfn: "/bin/sleep"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
""".format(
pegasus_bin_dir=PEGASUS_BIN_DIR
)
with open("transformations.yml", "w") as f:
f.write(transformations)
# --- Input Directory Setup ----------------------------------------------------
try:
Path.mkdir(Path("input"))
except FileExistsError:
pass
# --- Blackdiamond Subworkflow -------------------------------------------------
with open("input/f.a", "w") as f:
f.write("Sample input file\n")
fa = File("f.a")
fb1 = File("f.b1")
fb2 = File("f.b2")
fc1 = File("f.c1")
fc2 = File("f.c2")
fd = File("f.d")
wf = (
Workflow("blackdiamond")
.add_jobs(
Job("preprocess", namespace="diamond", version="4.0")
.add_args("-a", "preprocess", "-T", "60", "-i", fa, "-o", fb1, fb2)
.add_inputs(fa)
.add_outputs(fb1, fb2, register_replica=True),
Job("findrange", namespace="diamond", version="4.0")
.add_args("-a", "findrange", "-T", "60", "-i", fb1, "-o", fc1)
.add_inputs(fb1)
.add_outputs(fc1, register_replica=True),
Job("findrange", namespace="diamond", version="4.0")
.add_args("-a", "findrange", "-T", "60", "-i", fb2, "-o", fc2)
.add_inputs(fb2)
.add_outputs(fc2, register_replica=True),
Job("analyze", namespace="diamond", version="4.0")
.add_args("-a", "analyze", "-T", "60", "-i", fc1, fc2, "-o", fd)
.add_inputs(fc1, fc2)
.add_outputs(fd, register_replica=True),
)
.write(str(TOP_DIR / "input/blackdiamond.yml"))
)
# --- Sleep Subworkflow --------------------------------------
j1 = Job("sleep", _id="sleep1", namespace="level1").add_args(2)
j2 = Job("sleep", _id="sleep2", namespace="level2").add_args(2)
wf = (
Workflow("sleep-wf")
.add_jobs(j1, j2)
.add_dependency(j1, children=[j2])
.write(str(TOP_DIR / "input/sleep.yml"))
)
# --- Top Level Workflow -------------------------------------------------------
wf = Workflow("local-hierarchy")
blackdiamond_wf = SubWorkflow("blackdiamond.yml", False).add_args(
"--input-dir", "input", "--output-sites", "local", "-vvv"
)
sleep_wf = SubWorkflow("sleep.yml", False).add_args("--output-sites", "local", "-vvv")
wf.add_jobs(blackdiamond_wf, sleep_wf)
wf.add_dependency(blackdiamond_wf, children=[sleep_wf])
try:
wf.plan(
site=["CCG"],
dir=str(WORK_DIR),
relative_dir=RUN_ID,
input_dirs=["input"],
verbose=3,
submit=True,
)
except PegasusClientError as e:
print(e.output)
|
|
#!/usr/bin/python
#----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# To use this in the embedded python interpreter using "lldb":
#
# cd /path/containing/crashlog.py
# lldb
# (lldb) script import crashlog
# "crashlog" command installed, type "crashlog --help" for detailed help
# (lldb) crashlog ~/Library/Logs/DiagnosticReports/a.crash
#
# The benefit of running the crashlog command inside lldb in the
# embedded python interpreter is when the command completes, there
# will be a target with all of the files loaded at the locations
# described in the crash log. Only the files that have stack frames
# in the backtrace will be loaded unless the "--load-all" option
# has been specified. This allows users to explore the program in the
# state it was in right at crash time.
#
# On MacOSX csh, tcsh:
# ( setenv PYTHONPATH /path/to/LLDB.framework/Resources/Python ; ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash )
#
# On MacOSX sh, bash:
# PYTHONPATH=/path/to/LLDB.framework/Resources/Python ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash
#----------------------------------------------------------------------
import lldb
import commands
import optparse
import os
import plistlib
import re
import shlex
import sys
import time
import uuid
class Address:
"""Class that represents an address that will be symbolicated"""
def __init__(self, target, load_addr):
self.target = target
self.load_addr = load_addr # The load address that this object represents
# the resolved lldb.SBAddress (if any), named so_addr for
# section/offset address
self.so_addr = None
self.sym_ctx = None # The cached symbol context for this address
# Any original textual description of this address to be used as a
# backup in case symbolication fails
self.description = None
self.symbolication = None # The cached symbolicated string that describes this address
self.inlined = False
def __str__(self):
s = "%#16.16x" % (self.load_addr)
if self.symbolication:
s += " %s" % (self.symbolication)
elif self.description:
s += " %s" % (self.description)
elif self.so_addr:
s += " %s" % (self.so_addr)
return s
def resolve_addr(self):
if self.so_addr is None:
self.so_addr = self.target.ResolveLoadAddress(self.load_addr)
return self.so_addr
def is_inlined(self):
return self.inlined
def get_symbol_context(self):
if self.sym_ctx is None:
sb_addr = self.resolve_addr()
if sb_addr:
self.sym_ctx = self.target.ResolveSymbolContextForAddress(
sb_addr, lldb.eSymbolContextEverything)
else:
self.sym_ctx = lldb.SBSymbolContext()
return self.sym_ctx
def get_instructions(self):
sym_ctx = self.get_symbol_context()
if sym_ctx:
function = sym_ctx.GetFunction()
if function:
return function.GetInstructions(self.target)
return sym_ctx.GetSymbol().GetInstructions(self.target)
return None
def symbolicate(self, verbose=False):
if self.symbolication is None:
self.symbolication = ''
self.inlined = False
sym_ctx = self.get_symbol_context()
if sym_ctx:
module = sym_ctx.GetModule()
if module:
# Print full source file path in verbose mode
if verbose:
self.symbolication += str(module.GetFileSpec()) + '`'
else:
self.symbolication += module.GetFileSpec().GetFilename() + '`'
function_start_load_addr = -1
function = sym_ctx.GetFunction()
block = sym_ctx.GetBlock()
line_entry = sym_ctx.GetLineEntry()
symbol = sym_ctx.GetSymbol()
inlined_block = block.GetContainingInlinedBlock()
if function:
self.symbolication += function.GetName()
if inlined_block:
self.inlined = True
self.symbolication += ' [inlined] ' + \
inlined_block.GetInlinedName()
block_range_idx = inlined_block.GetRangeIndexForBlockAddress(
self.so_addr)
if block_range_idx < lldb.UINT32_MAX:
block_range_start_addr = inlined_block.GetRangeStartAddress(
block_range_idx)
function_start_load_addr = block_range_start_addr.GetLoadAddress(
self.target)
if function_start_load_addr == -1:
function_start_load_addr = function.GetStartAddress().GetLoadAddress(self.target)
elif symbol:
self.symbolication += symbol.GetName()
function_start_load_addr = symbol.GetStartAddress().GetLoadAddress(self.target)
else:
self.symbolication = ''
return False
# Dump the offset from the current function or symbol if it
# is non zero
function_offset = self.load_addr - function_start_load_addr
if function_offset > 0:
self.symbolication += " + %u" % (function_offset)
elif function_offset < 0:
self.symbolication += " %i (invalid negative offset, file a bug) " % function_offset
# Print out any line information if any is available
if line_entry.GetFileSpec():
# Print full source file path in verbose mode
if verbose:
self.symbolication += ' at %s' % line_entry.GetFileSpec()
else:
self.symbolication += ' at %s' % line_entry.GetFileSpec().GetFilename()
self.symbolication += ':%u' % line_entry.GetLine()
column = line_entry.GetColumn()
if column > 0:
self.symbolication += ':%u' % column
return True
return False
class Section:
"""Class that represents an load address range"""
sect_info_regex = re.compile('(?P<name>[^=]+)=(?P<range>.*)')
addr_regex = re.compile('^\s*(?P<start>0x[0-9A-Fa-f]+)\s*$')
range_regex = re.compile(
'^\s*(?P<start>0x[0-9A-Fa-f]+)\s*(?P<op>[-+])\s*(?P<end>0x[0-9A-Fa-f]+)\s*$')
def __init__(self, start_addr=None, end_addr=None, name=None):
self.start_addr = start_addr
self.end_addr = end_addr
self.name = name
@classmethod
def InitWithSBTargetAndSBSection(cls, target, section):
sect_load_addr = section.GetLoadAddress(target)
if sect_load_addr != lldb.LLDB_INVALID_ADDRESS:
obj = cls(
sect_load_addr,
sect_load_addr +
section.size,
section.name)
return obj
else:
return None
def contains(self, addr):
return self.start_addr <= addr and addr < self.end_addr
def set_from_string(self, s):
match = self.sect_info_regex.match(s)
if match:
self.name = match.group('name')
range_str = match.group('range')
addr_match = self.addr_regex.match(range_str)
if addr_match:
self.start_addr = int(addr_match.group('start'), 16)
self.end_addr = None
return True
range_match = self.range_regex.match(range_str)
if range_match:
self.start_addr = int(range_match.group('start'), 16)
self.end_addr = int(range_match.group('end'), 16)
op = range_match.group('op')
if op == '+':
self.end_addr += self.start_addr
return True
print 'error: invalid section info string "%s"' % s
print 'Valid section info formats are:'
print 'Format Example Description'
print '--------------------- -----------------------------------------------'
print '<name>=<base> __TEXT=0x123000 Section from base address only'
print '<name>=<base>-<end> __TEXT=0x123000-0x124000 Section from base address and end address'
print '<name>=<base>+<size> __TEXT=0x123000+0x1000 Section from base address and size'
return False
def __str__(self):
if self.name:
if self.end_addr is not None:
if self.start_addr is not None:
return "%s=[0x%16.16x - 0x%16.16x)" % (
self.name, self.start_addr, self.end_addr)
else:
if self.start_addr is not None:
return "%s=0x%16.16x" % (self.name, self.start_addr)
return self.name
return "<invalid>"
class Image:
"""A class that represents an executable image and any associated data"""
def __init__(self, path, uuid=None):
self.path = path
self.resolved_path = None
self.resolved = False
self.unavailable = False
self.uuid = uuid
self.section_infos = list()
self.identifier = None
self.version = None
self.arch = None
self.module = None
self.symfile = None
self.slide = None
@classmethod
def InitWithSBTargetAndSBModule(cls, target, module):
'''Initialize this Image object with a module from a target.'''
obj = cls(module.file.fullpath, module.uuid)
obj.resolved_path = module.platform_file.fullpath
obj.resolved = True
obj.arch = module.triple
for section in module.sections:
symb_section = Section.InitWithSBTargetAndSBSection(
target, section)
if symb_section:
obj.section_infos.append(symb_section)
obj.arch = module.triple
obj.module = module
obj.symfile = None
obj.slide = None
return obj
def dump(self, prefix):
print "%s%s" % (prefix, self)
def debug_dump(self):
print 'path = "%s"' % (self.path)
print 'resolved_path = "%s"' % (self.resolved_path)
print 'resolved = %i' % (self.resolved)
print 'unavailable = %i' % (self.unavailable)
print 'uuid = %s' % (self.uuid)
print 'section_infos = %s' % (self.section_infos)
print 'identifier = "%s"' % (self.identifier)
print 'version = %s' % (self.version)
print 'arch = %s' % (self.arch)
print 'module = %s' % (self.module)
print 'symfile = "%s"' % (self.symfile)
print 'slide = %i (0x%x)' % (self.slide, self.slide)
def __str__(self):
s = ''
if self.uuid:
s += "%s " % (self.get_uuid())
if self.arch:
s += "%s " % (self.arch)
if self.version:
s += "%s " % (self.version)
resolved_path = self.get_resolved_path()
if resolved_path:
s += "%s " % (resolved_path)
for section_info in self.section_infos:
s += ", %s" % (section_info)
if self.slide is not None:
s += ', slide = 0x%16.16x' % self.slide
return s
def add_section(self, section):
# print "added '%s' to '%s'" % (section, self.path)
self.section_infos.append(section)
def get_section_containing_load_addr(self, load_addr):
for section_info in self.section_infos:
if section_info.contains(load_addr):
return section_info
return None
def get_resolved_path(self):
if self.resolved_path:
return self.resolved_path
elif self.path:
return self.path
return None
def get_resolved_path_basename(self):
path = self.get_resolved_path()
if path:
return os.path.basename(path)
return None
def symfile_basename(self):
if self.symfile:
return os.path.basename(self.symfile)
return None
def has_section_load_info(self):
return self.section_infos or self.slide is not None
def load_module(self, target):
if self.unavailable:
return None # We already warned that we couldn't find this module, so don't return an error string
# Load this module into "target" using the section infos to
# set the section load addresses
if self.has_section_load_info():
if target:
if self.module:
if self.section_infos:
num_sections_loaded = 0
for section_info in self.section_infos:
if section_info.name:
section = self.module.FindSection(
section_info.name)
if section:
error = target.SetSectionLoadAddress(
section, section_info.start_addr)
if error.Success():
num_sections_loaded += 1
else:
return 'error: %s' % error.GetCString()
else:
return 'error: unable to find the section named "%s"' % section_info.name
else:
return 'error: unable to find "%s" section in "%s"' % (
range.name, self.get_resolved_path())
if num_sections_loaded == 0:
return 'error: no sections were successfully loaded'
else:
err = target.SetModuleLoadAddress(
self.module, self.slide)
if err.Fail():
return err.GetCString()
return None
else:
return 'error: invalid module'
else:
return 'error: invalid target'
else:
return 'error: no section infos'
def add_module(self, target):
'''Add the Image described in this object to "target" and load the sections if "load" is True.'''
if target:
# Try and find using UUID only first so that paths need not match
# up
uuid_str = self.get_normalized_uuid_string()
if uuid_str:
self.module = target.AddModule(None, None, uuid_str)
if not self.module:
self.locate_module_and_debug_symbols()
if self.unavailable:
return None
resolved_path = self.get_resolved_path()
self.module = target.AddModule(
resolved_path, self.arch, uuid_str, self.symfile)
if not self.module:
return 'error: unable to get module for (%s) "%s"' % (
self.arch, self.get_resolved_path())
if self.has_section_load_info():
return self.load_module(target)
else:
return None # No sections, the module was added to the target, so success
else:
return 'error: invalid target'
def locate_module_and_debug_symbols(self):
# By default, just use the paths that were supplied in:
# self.path
# self.resolved_path
# self.module
# self.symfile
# Subclasses can inherit from this class and override this function
self.resolved = True
return True
def get_uuid(self):
if not self.uuid and self.module:
self.uuid = uuid.UUID(self.module.GetUUIDString())
return self.uuid
def get_normalized_uuid_string(self):
if self.uuid:
return str(self.uuid).upper()
return None
def create_target(self):
'''Create a target using the information in this Image object.'''
if self.unavailable:
return None
if self.locate_module_and_debug_symbols():
resolved_path = self.get_resolved_path()
path_spec = lldb.SBFileSpec(resolved_path)
#result.PutCString ('plist[%s] = %s' % (uuid, self.plist))
error = lldb.SBError()
target = lldb.debugger.CreateTarget(
resolved_path, self.arch, None, False, error)
if target:
self.module = target.FindModule(path_spec)
if self.has_section_load_info():
err = self.load_module(target)
if err:
print 'ERROR: ', err
return target
else:
print 'error: unable to create a valid target for (%s) "%s"' % (self.arch, self.path)
else:
print 'error: unable to locate main executable (%s) "%s"' % (self.arch, self.path)
return None
class Symbolicator:
def __init__(self):
"""A class the represents the information needed to symbolicate addresses in a program"""
self.target = None
self.images = list() # a list of images to be used when symbolicating
self.addr_mask = 0xffffffffffffffff
@classmethod
def InitWithSBTarget(cls, target):
obj = cls()
obj.target = target
obj.images = list()
triple = target.triple
if triple:
arch = triple.split('-')[0]
if "arm" in arch:
obj.addr_mask = 0xfffffffffffffffe
for module in target.modules:
image = Image.InitWithSBTargetAndSBModule(target, module)
obj.images.append(image)
return obj
def __str__(self):
s = "Symbolicator:\n"
if self.target:
s += "Target = '%s'\n" % (self.target)
s += "Target modules:\n"
for m in self.target.modules:
s += str(m) + "\n"
s += "Images:\n"
for image in self.images:
s += ' %s\n' % (image)
return s
def find_images_with_identifier(self, identifier):
images = list()
for image in self.images:
if image.identifier == identifier:
images.append(image)
if len(images) == 0:
regex_text = '^.*\.%s$' % (re.escape(identifier))
regex = re.compile(regex_text)
for image in self.images:
if regex.match(image.identifier):
images.append(image)
return images
def find_image_containing_load_addr(self, load_addr):
for image in self.images:
if image.get_section_containing_load_addr(load_addr):
return image
return None
def create_target(self):
if self.target:
return self.target
if self.images:
for image in self.images:
self.target = image.create_target()
if self.target:
if self.target.GetAddressByteSize() == 4:
triple = self.target.triple
if triple:
arch = triple.split('-')[0]
if "arm" in arch:
self.addr_mask = 0xfffffffffffffffe
return self.target
return None
def symbolicate(self, load_addr, verbose=False):
if not self.target:
self.create_target()
if self.target:
live_process = False
process = self.target.process
if process:
state = process.state
if state > lldb.eStateUnloaded and state < lldb.eStateDetached:
live_process = True
# If we don't have a live process, we can attempt to find the image
# that a load address belongs to and lazily load its module in the
# target, but we shouldn't do any of this if we have a live process
if not live_process:
image = self.find_image_containing_load_addr(load_addr)
if image:
image.add_module(self.target)
symbolicated_address = Address(self.target, load_addr)
if symbolicated_address.symbolicate(verbose):
if symbolicated_address.so_addr:
symbolicated_addresses = list()
symbolicated_addresses.append(symbolicated_address)
# See if we were able to reconstruct anything?
while True:
inlined_parent_so_addr = lldb.SBAddress()
inlined_parent_sym_ctx = symbolicated_address.sym_ctx.GetParentOfInlinedScope(
symbolicated_address.so_addr, inlined_parent_so_addr)
if not inlined_parent_sym_ctx:
break
if not inlined_parent_so_addr:
break
symbolicated_address = Address(
self.target, inlined_parent_so_addr.GetLoadAddress(
self.target))
symbolicated_address.sym_ctx = inlined_parent_sym_ctx
symbolicated_address.so_addr = inlined_parent_so_addr
symbolicated_address.symbolicate(verbose)
# push the new frame onto the new frame stack
symbolicated_addresses.append(symbolicated_address)
if symbolicated_addresses:
return symbolicated_addresses
else:
print 'error: no target in Symbolicator'
return None
def disassemble_instructions(
target,
instructions,
pc,
insts_before_pc,
insts_after_pc,
non_zeroeth_frame):
lines = list()
pc_index = -1
comment_column = 50
for inst_idx, inst in enumerate(instructions):
inst_pc = inst.GetAddress().GetLoadAddress(target)
if pc == inst_pc:
pc_index = inst_idx
mnemonic = inst.GetMnemonic(target)
operands = inst.GetOperands(target)
comment = inst.GetComment(target)
#data = inst.GetData (target)
lines.append("%#16.16x: %8s %s" % (inst_pc, mnemonic, operands))
if comment:
line_len = len(lines[-1])
if line_len < comment_column:
lines[-1] += ' ' * (comment_column - line_len)
lines[-1] += "; %s" % comment
if pc_index >= 0:
# If we are disassembling the non-zeroeth frame, we need to backup the
# PC by 1
if non_zeroeth_frame and pc_index > 0:
pc_index = pc_index - 1
if insts_before_pc == -1:
start_idx = 0
else:
start_idx = pc_index - insts_before_pc
if start_idx < 0:
start_idx = 0
if insts_before_pc == -1:
end_idx = inst_idx
else:
end_idx = pc_index + insts_after_pc
if end_idx > inst_idx:
end_idx = inst_idx
for i in range(start_idx, end_idx + 1):
if i == pc_index:
print ' -> ', lines[i]
else:
print ' ', lines[i]
def print_module_section_data(section):
print section
section_data = section.GetSectionData()
if section_data:
ostream = lldb.SBStream()
section_data.GetDescription(ostream, section.GetFileAddress())
print ostream.GetData()
def print_module_section(section, depth):
print section
if depth > 0:
num_sub_sections = section.GetNumSubSections()
for sect_idx in range(num_sub_sections):
print_module_section(
section.GetSubSectionAtIndex(sect_idx), depth - 1)
def print_module_sections(module, depth):
for sect in module.section_iter():
print_module_section(sect, depth)
def print_module_symbols(module):
for sym in module:
print sym
def Symbolicate(command_args):
usage = "usage: %prog [options] <addr1> [addr2 ...]"
description = '''Symbolicate one or more addresses using LLDB's python scripting API..'''
parser = optparse.OptionParser(
description=description,
prog='crashlog.py',
usage=usage)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option(
'-p',
'--platform',
type='string',
metavar='platform',
dest='platform',
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".')
parser.add_option(
'-f',
'--file',
type='string',
metavar='file',
dest='file',
help='Specify a file to use when symbolicating')
parser.add_option(
'-a',
'--arch',
type='string',
metavar='arch',
dest='arch',
help='Specify a architecture to use when symbolicating')
parser.add_option(
'-s',
'--slide',
type='int',
metavar='slide',
dest='slide',
help='Specify the slide to use on the file specified with the --file option',
default=None)
parser.add_option(
'--section',
type='string',
action='append',
dest='section_strings',
help='specify <sect-name>=<start-addr> or <sect-name>=<start-addr>-<end-addr>')
try:
(options, args) = parser.parse_args(command_args)
except:
return
symbolicator = Symbolicator()
images = list()
if options.file:
image = Image(options.file)
image.arch = options.arch
# Add any sections that were specified with one or more --section
# options
if options.section_strings:
for section_str in options.section_strings:
section = Section()
if section.set_from_string(section_str):
image.add_section(section)
else:
sys.exit(1)
if options.slide is not None:
image.slide = options.slide
symbolicator.images.append(image)
target = symbolicator.create_target()
if options.verbose:
print symbolicator
if target:
for addr_str in args:
addr = int(addr_str, 0)
symbolicated_addrs = symbolicator.symbolicate(
addr, options.verbose)
for symbolicated_addr in symbolicated_addrs:
print symbolicated_addr
print
else:
print 'error: no target for %s' % (symbolicator)
if __name__ == '__main__':
# Create a new debugger instance
lldb.debugger = lldb.SBDebugger.Create()
Symbolicate(sys.argv[1:])
|
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=maybe-no-member
"""Test request import and updates."""
from datetime import date, timedelta
from flask.json import dumps
from ggrc import models
from ggrc.converters import errors
from integration.ggrc import converters
class TestRequestImport(converters.TestCase):
"""Basic Request import tests with.
This test suite should test new Request imports and updates. The main focus
of these tests is checking error messages for invalid state transitions.
"""
def setUp(self):
""" Set up for Request test cases """
converters.TestCase.setUp(self)
self.client.get("/login")
def _test_request_users(self, request, users):
""" Test that all users have correct roles on specified Request"""
verification_errors = ""
for user_name, expected_types in users.items():
try:
user = models.Person.query.filter_by(name=user_name).first()
rel = models.Relationship.find_related(request, user)
if expected_types:
self.assertNotEqual(
rel,
None,
"User {} is not mapped to {}".format(user.email, request.slug)
)
self.assertIn("AssigneeType", rel.relationship_attrs)
self.assertEqual(
set(rel.relationship_attrs[
"AssigneeType"].attr_value.split(",")),
expected_types
)
else:
self.assertEqual(
rel,
None,
"User {} is mapped to {}".format(user.email, request.slug)
)
except AssertionError as error:
verification_errors += "\n\nChecks for Users-Request mapping failed "\
"for user '{}' with:\n{}".format(user_name, str(error))
self.assertEqual(verification_errors, "", verification_errors)
def test_request_full_no_warnings(self):
""" Test full request import with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
filename = "request_full_no_warnings.csv"
response = self.import_file(filename)
self._check_csv_response(response, {})
# Test first request line in the CSV file
request_1 = models.Request.query.filter_by(slug="Request 1").first()
users = {
"user 1": {"Assignee"},
"user 2": {"Assignee", "Requester"},
"user 3": {"Requester", "Verifier"},
"user 4": {"Verifier"},
"user 5": {"Verifier"},
}
self._test_request_users(request_1, users)
self.assertEqual(request_1.status, models.Request.START_STATE)
self.assertEqual(request_1.request_type, "documentation")
# Test second request line in the CSV file
request_2 = models.Request.query.filter_by(slug="Request 2").first()
users = {
"user 1": {"Assignee"},
"user 2": {"Requester"},
"user 3": {"Verifier"},
"user 4": {},
"user 5": {},
}
self._test_request_users(request_2, users)
self.assertEqual(request_2.status, models.Request.PROGRESS_STATE)
self.assertEqual(request_2.request_type, "interview")
def test_request_import_states(self):
""" Test Request state imports
These tests are an intermediate part for zucchini release and will be
updated in the next release.
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=299569476
"""
self.import_file("request_full_no_warnings.csv")
response = self.import_file("request_update_intermediate.csv")
expected_errors = {
"Request": {
"block_errors": set(),
"block_warnings": set(),
"row_errors": set(),
"row_warnings": set([
errors.REQUEST_INVALID_STATE.format(line=5),
errors.REQUEST_INVALID_STATE.format(line=6),
errors.REQUEST_INVALID_STATE.format(line=11),
errors.REQUEST_INVALID_STATE.format(line=12),
]),
}
}
self._check_csv_response(response, expected_errors)
requests = {r.slug: r for r in models.Request.query.all()}
self.assertEqual(requests["Request 60"].status, models.Request.START_STATE)
self.assertEqual(requests["Request 61"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 62"].status, models.Request.DONE_STATE)
self.assertEqual(requests["Request 63"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 64"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 3"].status,
models.Request.PROGRESS_STATE)
self.assertEqual(requests["Request 4"].status,
models.Request.PROGRESS_STATE)
# Check that there is only one attachment left
request1 = requests["Request 1"]
self.assertEqual(len(request1.documents), 1)
# Check that there are only the two new URLs present in request 1
url_titles = set(obj.title for obj in request1.related_objects()
if isinstance(obj, models.Document))
self.assertEqual(url_titles, set(["a.b.com", "c.d.com"]))
def test_request_warnings_errors(self):
""" Test full request import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
self.import_file("request_full_no_warnings.csv")
response = self.import_file("request_with_warnings_and_errors.csv")
expected_errors = {
"Request": {
"block_errors": set([]),
"block_warnings": set([
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="error description - non existing column will "
"be ignored"
),
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="actual error message"
),
]),
"row_errors": set([
errors.UNKNOWN_OBJECT.format(
line=19,
object_type="Audit",
slug="not existing"
),
errors.DUPLICATE_VALUE_IN_CSV.format(
line_list="20, 22",
column_name="Code",
value="Request 22",
s="",
ignore_lines="22",
),
]),
"row_warnings": set([
errors.UNKNOWN_USER_WARNING.format(
line=14,
email="non_existing@a.com",
),
errors.UNKNOWN_OBJECT.format(
line=14,
object_type="Project",
slug="proj-55"
),
errors.REQUEST_INVALID_STATE.format(line=21),
errors.REQUEST_INVALID_STATE.format(line=22),
errors.WRONG_REQUIRED_VALUE.format(
line=20,
column_name="Status",
value="open",
),
errors.WRONG_VALUE.format(line=3, column_name="Url"),
]),
}
}
self._check_csv_response(response, expected_errors)
def test_request_default_dates(self):
""" Test full request import with missing Starts On / Due On date values
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
self.import_file("request_full_no_warnings.csv")
self.import_file("request_with_warnings_and_errors.csv")
requests = {r.slug: r for r in models.Request.query.all()}
today = date.today()
seven_days = timedelta(7)
self.assertEqual(requests["Request 17"].end_date, today + seven_days)
self.assertEqual(requests["Request 18"].start_date, today)
self.assertEqual(requests["Request 19"].start_date, today)
self.assertEqual(requests["Request 19"].end_date, today + seven_days)
class TestRequestExport(converters.TestCase):
"""Test Request object export."""
def setUp(self):
""" Set up for Request test cases """
converters.TestCase.setUp(self)
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def export_csv(self, data):
return self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
def test_simple_export(self):
""" Test full request export with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
self.import_file("request_full_no_warnings.csv")
data = [{
"object_name": "Request",
"filters": {"expression": {}},
"fields": "all",
}]
response = self.export_csv(data)
self.assertIn(u"\u5555", response.data.decode("utf8"))
|
|
#Requirements
import sys
import os
import random
import pygame
from pygame.locals import *
import constants
#import player
import bricks
import player
pygame.init()
#Start Class Declaration
class PyBreakout:
def __init__(self):
self.initialize()
self.mainLoop()
#Declare variables and initialization classes here
def initialize(self):
pygame.init()
self.sound = pygame.mixer.Sound("adam.wav")
self.death = pygame.mixer.Sound("death.wav")
self.width = 924
self.height = 768
self.screen = pygame.display.set_mode((self.width, self.height))
self.caption = "pyBreakout"
pygame.display.set_caption(self.caption)
self.framerate = 60
#self.backgroundColour = (255,255,255)
#self.foregroundColour = (0,99,207)
self.clock = pygame.time.Clock()
self.fontScore = pygame.font.SysFont("Arial Black", 21, bold=False)
self.gameScore = 0
self.gameBatsLeft = 3
self.gameBricksLeft = 120
#self.brick = bricks.Brick(self.screen)
self.ticks = 0
#initialize the bricks that will be displayed
self.bricksArr = []
# xPos = 44
# yPos = 160
# for i in range(12):
# brickObj = bricks.Brick(self.screen)
# brickObj.setPosX(xPos)
# brickObj.setPosY(yPos)
# self.bricksArr.append(brickObj)
# xPos += 70
self.initBricks()
self.special = random.randint(2, 119)
self.bat = player.Bat(self.screen)
self.projectile = player.Projectile(self.screen)
self.projectileFired = False
def initBricks(self):
yPos = 160
for x in range(10):
xPos = 44
for i in range(12):
brickObj = bricks.Brick(self.screen)
brickObj.setPosX(xPos)
brickObj.setPosY(yPos)
self.bricksArr.append(brickObj)
xPos += 70
yPos += 22
def fireProjectile(self):
self.projectileFired = True
self.projectile.setPosition(self.bat.getPosX() + 42, self.bat.getPosY())
def mainLoop(self):
while True:
gameTime = self.clock.get_time()
self.update(gameTime)
self.draw(gameTime)
self.clock.tick(self.framerate)
#Projectile Collision Handler
def projectileCollision(self):
#Draw bounding box for Projectile
rectProjectile = pygame.Rect(self.projectile.getPosX(), self.projectile.getPosY(),5,5)
#Draw bounding box for Bricks
for i in range(120):
rectBrick = pygame.Rect(self.bricksArr[i].getPosX(),self.bricksArr[i].getPosY(), 66, 18)
if rectProjectile.colliderect(rectBrick):
if i == self.special:
self.gameBatsLeft += 1
self.gameScore += 20
self.bricksArr[i].setPosX(1000)
self.sound.play()
else:
self.bricksArr[i].setPosX(1000)
self.gameBricksLeft = self.gameBricksLeft - 1
self.gameScore += 10
self.projectile.reflect()
self.projectile.reflectGrad()
self.sound.play()
#Draw bounding box for bat
rectBat = []
rectBat.append(pygame.Rect(self.bat.getPosX(), self.bat.getPosY(), 20, 15))
rectBat.append(pygame.Rect(self.bat.getPosX()+20, self.bat.getPosY(), 20, 15))
rectBat.append(pygame.Rect(self.bat.getPosX()+40, self.bat.getPosY(), 5, 15))
rectBat.append(pygame.Rect(self.bat.getPosX()+45, self.bat.getPosY(), 20, 15))
rectBat.append(pygame.Rect(self.bat.getPosX()+65, self.bat.getPosY(), 20, 15))
for i in range(5):
if rectProjectile.colliderect(rectBat[0]):
self.projectile.setGrad(-1)
self.projectile.reflect()
self.sound.play()
elif rectProjectile.colliderect(rectBat[1]):
self.projectile.setGrad(-0.3)
self.projectile.reflect()
self.sound.play()
elif rectProjectile.colliderect(rectBat[2]):
self.projectile.setGrad(0.3)
self.projectile.reflect()
self.sound.play()
elif rectProjectile.colliderect(rectBat[3]):
self.projectile.setGrad(0.3)
self.projectile.reflect()
self.sound.play()
elif rectProjectile.colliderect(rectBat[4]):
self.projectile.setGrad(1)
self.projectile.reflect()
self.sound.play()
#Draw bounding box for walls
rectWalls = []
rectWalls.append(pygame.Rect(20,50,20,650))
rectWalls.append(pygame.Rect(20,50,884,20))
rectWalls.append(pygame.Rect(884,50,20,650))
for i in range(3):
if rectProjectile.colliderect(rectWalls[i]):
self.projectile.reflectGrad()
self.sound.play()
if rectProjectile.colliderect(rectWalls[1]):
self.projectile.reflect()
#Draw Bounding for entire level (lost projectile)
rectOutofBounds = pygame.Rect(0,760,1000,10) # remember to do the thing
if rectProjectile.colliderect(rectOutofBounds):
self.projectileFired = False
self.gameBatsLeft = self.gameBatsLeft - 1
self.projectile.setGrad(0.2)
self.projectile.reflect()
self.death.play()
def gameLost(self):
game = MainMenu(self.gameScore, 2)
def gameWon(self):
game = MainMenu(self.gameScore, 1)
def update(self, gameTime):
if self.gameBatsLeft == -1:
self.gameLost()
if self.gameBricksLeft == 0:
self.gameWon()
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
game = MainMenu(0, 0)
pressed = pygame.key.get_pressed()
if pressed[pygame.K_a] or pressed[pygame.K_LEFT]:
if self.bat.getPosX() >= 44:
self.bat.setPosX(self.bat.getPosX() - 15)
if pressed[pygame.K_d] or pressed[pygame.K_RIGHT]:
if self.bat.getPosX() <= 799:
self.bat.setPosX(self.bat.getPosX() + 15)
if pressed[pygame.K_SPACE] and (self.projectileFired == False):
self.fireProjectile() #could use some work
self.ticks = self.ticks + gameTime
if (self.projectileFired == True):
self.projectile.move()
self.projectileCollision()
def draw(self, gameTime):
self.screen.fill(constants.backgroundColour)
#Display The Scoring Text
labelBricksLeft = self.fontScore.render("Bricks Left: %d" % self.gameBricksLeft, 1, constants.foregroundColour)
labelBatsLeft = self.fontScore.render("Bats Left: %d" % self.gameBatsLeft, 1, constants.foregroundColour)
labelScore = self.fontScore.render("Score: %d" % self.gameScore, 1, constants.foregroundColour)
#write text to screen
self.screen.blit(labelBricksLeft, (20,20))
self.screen.blit(labelBatsLeft, (370,20))
self.screen.blit(labelScore, (710,20))
#Draw bounding boxes
pygame.draw.rect(self.screen, constants.foregroundColour, [20,50,20,650])
pygame.draw.rect(self.screen, constants.foregroundColour, [20,50,884,20])
pygame.draw.rect(self.screen, constants.foregroundColour, [884,50,20,650])
# Colour the blocks
c = 0
for x in range(120):
self.bricksArr[x].draw(c)
if (x == 23 or x == 47 or x == 71 or x == 95):
c = c + 1
self.bricksArr[self.special].draw(5)
#Draw Player
self.bat.draw()
if (self.projectileFired == True):
self.projectile.draw()
#Projectile Debug
pygame.display.flip()
class MainMenu:
def __init__(self, score, state):
pygame.init()
self.score = score
self.width = 924
self.height = 768
self.screen = pygame.display.set_mode((self.width, self.height))
self.clock = pygame.time.Clock()
self.framerate = 60
self.fontWelcome = pygame.font.SysFont("Arial Black", 21, bold=False)
self.fontBig = pygame.font.SysFont("Arial Black", 40, bold=False)
self.logo = pygame.image.load(os.path.join('resources', 'logo.jpg'))
self.state = state
self.mainLoop()
def mainLoop(self):
while True:
gameTime = self.clock.get_time()
self.update(gameTime)
self.draw(gameTime)
self.clock.tick(self.framerate)
def update(self, gameTime):
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_SPACE:
game = PyBreakout()
def draw(self, gameTime):
self.screen.fill(constants.backgroundColour)
labelWelcome = self.fontWelcome.render("Press <Space> To Start Game", 1, constants.foregroundColour)
labelInstruction1 = self.fontWelcome.render("You have 3 Lives", 1, constants.foregroundColour)
labelInstruction2 = self.fontWelcome.render("Each brick is 10 points", 1, constants.foregroundColour)
labelInstruction3 = self.fontWelcome.render("The Black brick gives you 20 points and 1 extra life", 1, constants.foregroundColour)
labelInstruction4 = self.fontWelcome.render("Use A & D or the arrow keys to move", 1, constants.foregroundColour)
labelInstruction5 = self.fontWelcome.render("You can press <esc> at any time to return to this screen", 1, constants.foregroundColour)
labelWin = self.fontBig.render("Congrats you beat the game!", 1, constants.foregroundColour)
labelLoss = self.fontBig.render("Oh no you ran out of bats", 1, constants.foregroundColour)
labelScore = self.fontWelcome.render("Final score: %d" % self.score, 1, constants.foregroundColour)
labelPlayAgain= self.fontWelcome.render("Press <Space> To try again", 1, constants.foregroundColour)
if self.state == 0:
self.screen.blit(self.logo, (40,200))
self.screen.blit(labelWelcome, (300, 400))
self.screen.blit(labelInstruction1, (350, 440))
self.screen.blit(labelInstruction2, (320, 460))
self.screen.blit(labelInstruction3, (190, 480))
self.screen.blit(labelInstruction4, (280, 500))
self.screen.blit(labelInstruction5, (140, 520))
elif self.state == 1:
self.screen.blit(labelWin, (140,200))
elif self.state == 2:
self.screen.blit(labelLoss, (180,300))
self.screen.blit(labelScore, (400,400))
self.screen.blit(labelPlayAgain, (320,440))
pygame.display.flip()
if __name__ == "__main__":
game = MainMenu(0,0)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorArray: a dynamically sized array of Tensors.
@@TensorArray
"""
# Mixture of pep8 and non-pep8 names, so disable pylint bad-name
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_should_use
# TensorArray object accesses many of the hidden generated ops, but is
# in fact built to wrap these methods.
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`while_loop` and `map_fn`. It supports gradient back-propagation via special
"flow" control flow dependencies.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and element_shape is not None:
raise ValueError("Cannot provide both a handle and element_shape "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
if handle is not None and clear_after_read is not None:
raise ValueError("Cannot provide both a handle and clear_after_read "
"at the same time")
if clear_after_read is None:
clear_after_read = True
dynamic_size = dynamic_size or False
self._dtype = dtype
# Used to keep track of what tensors the TensorArray should be
# colocated with. We choose to colocate the TensorArray with the
# first tensor written to it.
self._colocate_with_first_write_call = colocate_with_first_write_call
if colocate_with_first_write_call:
self._colocate_with = []
else:
self._colocate_with = None
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope:
if handle is not None:
self._handle = handle
if flow is None:
raise ValueError("flow must not be None if handle is not None.")
self._flow = flow
else:
# Construct the TensorArray with an empty device. The first
# write into the TensorArray from a Tensor with a set device
# will retroactively set the device value of this op.
def create():
return gen_data_flow_ops._tensor_array_v3(
dtype=dtype,
size=size,
element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
name=scope)
if colocate_with_first_write_call:
with ops.device(None), ops.colocate_with(None, ignore_existing=True):
self._handle, self._flow = create()
else:
self._handle, self._flow = create()
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
@contextlib.contextmanager
def _maybe_colocate_with(self, value):
"""Colocate operations with an internal colocation group or `value`.
Args:
value: `Tensor`, the tensor to try to colocate with.
Yields:
Does not yield anything, but the new context is a colocation context.
If no internal colocation group is set, colocate with `value` and set
the internal colocation group to be value.
"""
if not self._colocate_with_first_write_call:
yield
else:
if not self._colocate_with:
self._colocate_with.append(value)
with ops.colocate_with(self._colocate_with[0]):
yield
def identity(self):
"""Returns a TensorArray with the same content and properties.
Returns:
A new TensorArray object with flow that ensures the control dependencies
from the contexts will become control dependencies for writes, reads, etc.
Use this object all for subsequent operations.
"""
flow = array_ops.identity(self._flow)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
def grad(self, source, flow=None, name=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
if flow is None:
flow = self.flow
with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
with ops.colocate_with(self._handle):
g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
handle=self._handle, source=source, flow_in=flow, name=name)
with ops.control_dependencies([g_handle]):
flow = array_ops.identity(flow, name="gradient_flow")
g = TensorArray(
dtype=self._dtype,
handle=g_handle,
flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=False)
g._element_shape = self._element_shape
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray.
Args:
index: 0-D. int32 tensor with the index to read from.
name: A name for the operation (optional).
Returns:
The tensor at index `index`.
"""
value = gen_data_flow_ops._tensor_array_read_v3(
handle=self._handle,
index=index,
flow_in=self._flow,
dtype=self._dtype,
name=name)
if self._element_shape:
value.set_shape(self._element_shape[0].dims)
return value
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The Tensor to write to this index.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the write occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if there are more writers than specified.
"""
with ops.name_scope(name, "TensorArrayWrite", [self._handle, index, value]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops._tensor_array_write_v3(
handle=self._handle,
index=index,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape:
ta._merge_element_shape(value.get_shape())
return ta
def stack(self, name=None):
"""Return the values in the TensorArray as a stacked `Tensor`.
All of the values must have been written and their shapes must all match.
If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray stacked into one tensor.
"""
with ops.colocate_with(self._handle):
with ops.name_scope(name, "TensorArrayStack", [self._handle]):
return self.gather(math_ops.range(0, self.size()), name=name)
def gather(self, indices, name=None):
"""Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Returns:
The in the `TensorArray` selected by `indices`, packed into one tensor.
"""
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = gen_data_flow_ops._tensor_array_gather_v3(
handle=self._handle,
indices=indices,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape=element_shape)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor.
"""
if self._element_shape and self._element_shape[0].dims is not None:
element_shape_except0 = (
tensor_shape.TensorShape(self._element_shape[0].dims[1:]))
else:
element_shape_except0 = tensor_shape.TensorShape(None)
value, _ = gen_data_flow_ops._tensor_array_concat_v3(
handle=self._handle,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape_except0=element_shape_except0)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims[1:])
return value
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""Unstack the values of a `Tensor` in the TensorArray.
If input value shapes have rank-`R`, then the output TensorArray will
contain elements whose shapes are rank-`(R-1)`.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the unstack occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
num_elements = array_ops.shape(value)[0]
return self.scatter(
indices=math_ops.range(0, num_elements), value=value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayScatter",
[self._handle, value, indices]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops._tensor_array_scatter_v3(
handle=self._handle,
indices=indices,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape and context.in_graph_mode():
val_shape = flow_out.op.inputs[2].get_shape()
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
element_shape = tensor_shape.TensorShape(val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting
`value` along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArraySplit",
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
lengths_64 = math_ops.to_int64(lengths)
flow_out = gen_data_flow_ops._tensor_array_split_v3(
handle=self._handle,
value=value,
lengths=lengths_64,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype, handle=self._handle, flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
if ta._infer_shape and context.in_graph_mode():
val_shape = flow_out.op.inputs[1].get_shape()
clengths = tensor_util.constant_value(flow_out.op.inputs[2])
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
element_shape = tensor_shape.TensorShape([clengths[0]] +
val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
return gen_data_flow_ops._tensor_array_size_v3(
handle=self._handle, flow_in=self.flow, name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""Close the current TensorArray."""
return gen_data_flow_ops._tensor_array_close_v3(
handle=self._handle, name=name)
# pylint: enable=protected-access
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import math
import struct
import sys
import zlib
class VMDK(object):
_SECT = 512
_HEADER = namedtuple('Header', 'magicNumber version flags capacity '
'grainSize descriptorOffset descriptorSize numGTEsPerGT rgdOffset '
'gdOffset overHead uncleanShutdown singleEndLineChar nonEndLineChar '
'doubleEndLineChar1 doubleEndLineChar2 compressAlgorithm pad')
_MARKER = namedtuple('Marker', 'val size data')
_GRAIN_MARKER = namedtuple('GrainMarker', 'lba size data offset')
_METADATA_MARKER = namedtuple('Metadata', 'numSectors size type pad metadata offset')
class Marker(object):
EOS = 0
GT = 1
GD = 2
FOOTER = 3
Pad = '\0' * 496
_StringRepr = [ 'EOS', 'GT', 'GD', 'FOOTER', ]
@classmethod
def DecodeType(cls, data):
return struct.unpack("<I", data)[0]
@classmethod
def toTypeString(cls, intVal):
return cls._StringRepr[intVal]
class BaseGrainTable(object):
__slots__ = [ 'map' ]
def __init__(self):
self.reset()
def reset(self):
self.map = self.empty()
def asTuple(self):
return tuple(self.map)
@classmethod
def fromData(cls, data):
sformat = "<%sI" % (len(data) / 4)
arr = struct.unpack(sformat, data)
ret = cls()
ret.map = arr
return ret
class GrainTable(BaseGrainTable):
__slots__ = [ 'offset', 'lba', ]
_format = "<512I"
BLOCK_SIZE = 65536
def __init__(self):
VMDK.BaseGrainTable.__init__(self)
self.offset = 0
self.lba = -1
def add(self, marker):
idx = (marker.lba % self.BLOCK_SIZE) / 128
self.map[idx] = marker.offset
self.lba = marker.lba / self.BLOCK_SIZE
def fromMarker(self, marker):
self.offset = marker.offset + 1
@classmethod
def decode(cls, marker, data):
if len(data) != 2048:
return []
return struct.unpack(cls._format, data)
@classmethod
def empty(cls):
return [ 0 ] * 512
def isEmpty(self):
for i in self.map:
if i != 0:
return False
return True
class GrainDirectory(BaseGrainTable):
__slots__ = []
GD_AT_END = 0xffffffffffffffff
def add(self, grainTable):
if grainTable.isEmpty():
return
idx = grainTable.lba
mapLen = len(self.map)
# We may have skipped some tables, but we need to add a full
# block of 128
while mapLen <= idx:
self.map.extend([ 0 ] * 128)
mapLen += 128
self.map[idx] = grainTable.offset
@classmethod
def decode(cls, marker, data):
sformat = "<%sI" % (marker.val * VMDK._SECT / 4)
metadata = struct.unpack(sformat, data)
return metadata
def empty(self):
return []
def __init__(self, fobj, outputFile=None):
self._fobj = fobj
self._outputFileName = outputFile
def inspect(self):
reconstruct = (self._outputFileName is not None)
if reconstruct:
fout = file(self._outputFileName, "w")
headerData = self._fobj.read(self._SECT)
self.header = self._HEADER(*struct.unpack("<4sIIQQQQIQQQBccccI431s", headerData))
self.assertEquals(self.header.magicNumber, 'KDMV')
print self.header
if reconstruct:
fout.seek(self.header.capacity * self._SECT)
fout.truncate()
if self.header.descriptorSize > 0:
# skip to descriptor
self._fobj.read(self._SECT * (self.header.descriptorOffset - 1))
self.descriptor = self._fobj.read(self._SECT * self.header.descriptorSize)
print self.descriptor
if self.header.gdOffset != self.GrainDirectory.GD_AT_END:
self.assertEquals(self.header.compressAlgorithm, 0)
return self.inspectNonStreamOptimized(fout)
# skip over the overhead
self._fobj.seek(self.header.overHead * self._SECT, 0)
grainTable = self.GrainTable()
grainDirectory = self.GrainDirectory()
while 1:
marker = self._readMarker(withData=reconstruct)
self._align()
if isinstance(marker, self._METADATA_MARKER):
print "%08x: Read metadata marker of type %s" % (marker.offset, self.Marker.toTypeString(marker.type))
if marker.type == self.Marker.GT:
grainTable.fromMarker(marker)
grainDirectory.add(grainTable)
if not reconstruct:
self.assertEquals(marker.metadata, grainTable.asTuple())
grainTable.reset()
continue
if marker.type == self.Marker.GD:
if reconstruct:
return
self.assertEquals(marker.metadata, grainDirectory.asTuple())
# We're done reading extents, we now need to read
# the footer
break
continue
print "Data: %08x: %d bytes" % (marker.lba, marker.size)
if reconstruct:
fout.seek(marker.lba * self._SECT)
fout.write(zlib.decompress(marker.data))
grainTable.add(marker)
footerMarker = self._readMarker()
self.assertEquals(footerMarker.type, self.Marker.FOOTER)
self.footer = self._HEADER(*struct.unpack("<4sIIQQQQIQQQBccccI431s",
footerMarker.metadata))
self.assertEquals(self.footer.magicNumber, 'KDMV')
eosMarker = self._readMarker()
self.assertEquals(eosMarker.type, self.Marker.EOS)
def inspectNonStreamOptimized(self, fout):
grainTable = self.GrainTable()
grainDirectory = self.GrainDirectory()
# Compute size of GD
numGTs = math.ceil(self.header.capacity / float(self.header.grainSize))
gdSize = int(math.ceil(numGTs / self.header.numGTEsPerGT))
# gd is aligned to a sector size, which is 512, with each entry
# being 4 bytes
gdSize = self.pad(gdSize, 512/4)
self._fobj.seek(self.header.gdOffset * self._SECT, 0)
grainDirectory = self.GrainDirectory.fromData(self._fobj.read(gdSize * 4))
self._fobj.seek(self.header.rgdOffset * self._SECT, 0)
rgrainDirectory = self.GrainDirectory.fromData(self._fobj.read(gdSize * 4))
#self.assertEquals(grainDirectory.map, rgrainDirectory.map)
grainSizeBytes = self.header.grainSize * self._SECT
for gtNum in range(gdSize):
self._fobj.seek(grainDirectory.map[gtNum] * self._SECT, 0)
gt = self.GrainTable.fromData(self._fobj.read(512 * 4))
assert len(gt.map) == 512
self._fobj.seek(rgrainDirectory.map[gtNum] * self._SECT, 0)
rgt = self.GrainTable.fromData(self._fobj.read(512 * 4))
self.assertEquals(gt.map, rgt.map)
for (gteNum, gte) in enumerate(gt.map):
pos = gtNum * self.header.numGTEsPerGT + gteNum
if pos >= numGTs:
break
self.assertEquals(gte, rgt.map[gteNum])
self._fobj.seek(self.header.gdOffset * self._SECT + gdSize * 4 + pos * 4)
data = self._fobj.read(4)
gteOther = struct.unpack("<I", data)[0]
self.assertEquals(gte, gteOther)
if fout and gte > 0:
self._fobj.seek(gte * self._SECT)
fout.seek((gtNum * 512 + gteNum) * grainSizeBytes)
data = self._fobj.read(grainSizeBytes)
assert len(data) == grainSizeBytes
fout.write(data)
if (gtNum + 1) * self.header.numGTEsPerGT > numGTs:
break
@classmethod
def pad(cls, number, paddingSize):
remainder = number % paddingSize
if remainder == 0:
return number
return number + paddingSize - remainder
def assertEquals(self, first, second):
assert first == second, "%s != %s" % (first, second)
def _readMarker(self, withData=False):
offset = self._fobj.tell()
assert offset % self._SECT == 0
offset /= self._SECT
markerData = self._fobj.read(16)
marker, markerType = self._readMarkerFromData(markerData)
if marker.size:
if withData:
grainData = marker.data + self._fobj.read(marker.size - 4)
else:
grainData = "..."
# Seek from current position, to pretend we're reading
self._fobj.seek(marker.size - 4, 1)
marker = self._GRAIN_MARKER(marker.val, marker.size, grainData, offset)
else:
# Realign to read the metadata
self._align()
metadata = self._fobj.read(marker.val * self._SECT)
if markerType == self.Marker.GD:
# Grain directories have a variable number of
# entries, depending on the extent size, and contain
# an unsigned int (4 byte)
metadata = self.GrainDirectory.decode(marker, metadata)
elif markerType == self.Marker.GT:
metadata = self.GrainTable.decode(marker, metadata)
marker = self._METADATA_MARKER(marker.val, marker.size, markerType,
self.Marker.Pad, metadata, offset)
return marker
@classmethod
def _readMarkerFromData(cls, markerData, checkMarkerType=True):
marker = cls._MARKER(*struct.unpack("<QI4s", markerData[:16]))
if marker.size:
# Compressed grain. Type not needed
markerType = -1
else:
markerType = cls.Marker.DecodeType(marker.data)
if checkMarkerType:
assert 0 <= markerType < len(cls.Marker._StringRepr)
return marker, markerType
def _align(self):
"Align to 512 byte boundary"
pos = self._fobj.tell()
padding = pos % self._SECT
if padding:
self._fobj.read(self._SECT - padding)
def main():
if len(sys.argv) < 2:
print "Usage: %s <file> [ <output-file> ]" % sys.argv[0]
return 1
vmdkFile = file(sys.argv[1])
if len(sys.argv) > 2:
outputFile = sys.argv[2]
else:
outputFile = None
vmdk = VMDK(vmdkFile, outputFile)
vmdk.inspect()
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Builtin event handlers.
This module contains builtin handlers for events emitted by botocore.
"""
import os
import base64
import logging
import copy
import re
import warnings
import uuid
from botocore.compat import (
unquote, json, six, unquote_str, ensure_bytes, get_md5,
OrderedDict, urlsplit, urlunsplit, XMLParseError,
ETree, quote,
)
from botocore.docs.utils import AutoPopulatedParam
from botocore.docs.utils import HideParamFromOperations
from botocore.docs.utils import AppendParamDocumentation
from botocore.signers import add_generate_presigned_url
from botocore.signers import add_generate_presigned_post
from botocore.signers import add_generate_db_auth_token
from botocore.exceptions import ParamValidationError
from botocore.exceptions import AliasConflictParameterError
from botocore.exceptions import UnsupportedTLSVersionWarning
from botocore.utils import percent_encode, SAFE_CHARS
from botocore.utils import switch_host_with_param
from botocore.utils import conditionally_calculate_md5
from botocore.utils import is_global_accesspoint
from botocore import utils
import botocore
import botocore.auth
# Keep these imported. There's pre-existing code that uses them.
from botocore import retryhandler # noqa
from botocore import translate # noqa
from botocore.compat import MD5_AVAILABLE # noqa
from botocore.exceptions import MissingServiceIdError # noqa
from botocore.utils import hyphenize_service_id # noqa
logger = logging.getLogger(__name__)
REGISTER_FIRST = object()
REGISTER_LAST = object()
# From the S3 docs:
# The rules for bucket names in the US Standard region allow bucket names
# to be as long as 255 characters, and bucket names can contain any
# combination of uppercase letters, lowercase letters, numbers, periods
# (.), hyphens (-), and underscores (_).
VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$')
_ACCESSPOINT_ARN = (
r'^arn:(aws).*:(s3|s3-object-lambda):[a-z\-0-9]*:[0-9]{12}:accesspoint[/:]'
r'[a-zA-Z0-9\-.]{1,63}$'
)
_OUTPOST_ARN = (
r'^arn:(aws).*:s3-outposts:[a-z\-0-9]+:[0-9]{12}:outpost[/:]'
r'[a-zA-Z0-9\-]{1,63}[/:]accesspoint[/:][a-zA-Z0-9\-]{1,63}$'
)
VALID_S3_ARN = re.compile('|'.join([_ACCESSPOINT_ARN, _OUTPOST_ARN]))
VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$')
SERVICE_NAME_ALIASES = {
'runtime.sagemaker': 'sagemaker-runtime'
}
def handle_service_name_alias(service_name, **kwargs):
return SERVICE_NAME_ALIASES.get(service_name, service_name)
def add_recursion_detection_header(params, **kwargs):
has_lambda_name = 'AWS_LAMBDA_FUNCTION_NAME' in os.environ
trace_id = os.environ.get('_X_AMZ_TRACE_ID')
if has_lambda_name and trace_id:
headers = params['headers']
if 'X-Amzn-Trace-Id' not in headers:
headers['X-Amzn-Trace-Id'] = quote(trace_id)
def escape_xml_payload(params, **kwargs):
# Replace \r and \n with the escaped sequence over the whole XML document
# to avoid linebreak normalization modifying customer input when the
# document is parsed. Ideally, we would do this in ElementTree.tostring,
# but it doesn't allow us to override entity escaping for text fields. For
# this operation \r and \n can only appear in the XML document if they were
# passed as part of the customer input.
body = params['body']
if b'\r' in body:
body = body.replace(b'\r', b'
')
if b'\n' in body:
body = body.replace(b'\n', b'
')
params['body'] = body
def check_for_200_error(response, **kwargs):
# From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
# There are two opportunities for a copy request to return an error. One
# can occur when Amazon S3 receives the copy request and the other can
# occur while Amazon S3 is copying the files. If the error occurs before
# the copy operation starts, you receive a standard Amazon S3 error. If the
# error occurs during the copy operation, the error response is embedded in
# the 200 OK response. This means that a 200 OK response can contain either
# a success or an error. Make sure to design your application to parse the
# contents of the response and handle it appropriately.
#
# So this handler checks for this case. Even though the server sends a
# 200 response, conceptually this should be handled exactly like a
# 500 response (with respect to raising exceptions, retries, etc.)
# We're connected *before* all the other retry logic handlers, so as long
# as we switch the error code to 500, we'll retry the error as expected.
if response is None:
# A None response can happen if an exception is raised while
# trying to retrieve the response. See Endpoint._get_response().
return
http_response, parsed = response
if _looks_like_special_case_error(http_response):
logger.debug("Error found for response with 200 status code, "
"errors: %s, changing status code to "
"500.", parsed)
http_response.status_code = 500
def _looks_like_special_case_error(http_response):
if http_response.status_code == 200:
try:
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding='utf-8')
parser.feed(http_response.content)
root = parser.close()
except XMLParseError:
# In cases of network disruptions, we may end up with a partial
# streamed response from S3. We need to treat these cases as
# 500 Service Errors and try again.
return True
if root.tag == 'Error':
return True
return False
def set_operation_specific_signer(context, signing_name, **kwargs):
""" Choose the operation-specific signer.
Individual operations may have a different auth type than the service as a
whole. This will most often manifest as operations that should not be
authenticated at all, but can include other auth modes such as sigv4
without body signing.
"""
auth_type = context.get('auth_type')
# Auth type will be None if the operation doesn't have a configured auth
# type.
if not auth_type:
return
# Auth type will be the string value 'none' if the operation should not
# be signed at all.
if auth_type == 'none':
return botocore.UNSIGNED
if auth_type.startswith('v4'):
signature_version = 'v4'
if signing_name == 's3':
if is_global_accesspoint(context):
signature_version = 's3v4a'
else:
signature_version = 's3v4'
# If the operation needs an unsigned body, we set additional context
# allowing the signer to be aware of this.
if auth_type == 'v4-unsigned-body':
context['payload_signing_enabled'] = False
return signature_version
def decode_console_output(parsed, **kwargs):
if 'Output' in parsed:
try:
# We're using 'replace' for errors because it is
# possible that console output contains non string
# chars we can't utf-8 decode.
value = base64.b64decode(six.b(parsed['Output'])).decode(
'utf-8', 'replace')
parsed['Output'] = value
except (ValueError, TypeError, AttributeError):
logger.debug('Error decoding base64', exc_info=True)
def generate_idempotent_uuid(params, model, **kwargs):
for name in model.idempotent_members:
if name not in params:
params[name] = str(uuid.uuid4())
logger.debug("injecting idempotency token (%s) into param '%s'." %
(params[name], name))
def decode_quoted_jsondoc(value):
try:
value = json.loads(unquote(value))
except (ValueError, TypeError):
logger.debug('Error loading quoted JSON', exc_info=True)
return value
def json_decode_template_body(parsed, **kwargs):
if 'TemplateBody' in parsed:
try:
value = json.loads(
parsed['TemplateBody'], object_pairs_hook=OrderedDict)
parsed['TemplateBody'] = value
except (ValueError, TypeError):
logger.debug('error loading JSON', exc_info=True)
def validate_bucket_name(params, **kwargs):
if 'Bucket' not in params:
return
bucket = params['Bucket']
if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket):
error_msg = (
'Invalid bucket name "%s": Bucket name must match '
'the regex "%s" or be an ARN matching the regex "%s"' % (
bucket, VALID_BUCKET.pattern, VALID_S3_ARN.pattern))
raise ParamValidationError(report=error_msg)
def sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller.
"""
_sse_md5(params, 'SSECustomer')
def copy_source_sse_md5(params, **kwargs):
"""
S3 server-side encryption requires the encryption key to be sent to the
server base64 encoded, as well as a base64-encoded MD5 hash of the
encryption key. This handler does both if the MD5 has not been set by
the caller specifically if the parameter is for the copy-source sse-c key.
"""
_sse_md5(params, 'CopySourceSSECustomer')
def _sse_md5(params, sse_member_prefix='SSECustomer'):
if not _needs_s3_sse_customization(params, sse_member_prefix):
return
sse_key_member = sse_member_prefix + 'Key'
sse_md5_member = sse_member_prefix + 'KeyMD5'
key_as_bytes = params[sse_key_member]
if isinstance(key_as_bytes, six.text_type):
key_as_bytes = key_as_bytes.encode('utf-8')
key_md5_str = base64.b64encode(
get_md5(key_as_bytes).digest()).decode('utf-8')
key_b64_encoded = base64.b64encode(key_as_bytes).decode('utf-8')
params[sse_key_member] = key_b64_encoded
params[sse_md5_member] = key_md5_str
def _needs_s3_sse_customization(params, sse_member_prefix):
return (
params.get(sse_member_prefix + 'Key') is not None
and sse_member_prefix + 'KeyMD5' not in params
)
def disable_signing(**kwargs):
"""
This handler disables request signing by setting the signer
name to a special sentinel value.
"""
return botocore.UNSIGNED
def add_expect_header(model, params, **kwargs):
if model.http.get('method', '') not in ['PUT', 'POST']:
return
if 'body' in params:
body = params['body']
if hasattr(body, 'read'):
# Any file like object will use an expect 100-continue
# header regardless of size.
logger.debug("Adding expect 100 continue header to request.")
params['headers']['Expect'] = '100-continue'
class DeprecatedServiceDocumenter(object):
def __init__(self, replacement_service_name):
self._replacement_service_name = replacement_service_name
def inject_deprecation_notice(self, section, event_name, **kwargs):
section.style.start_important()
section.write('This service client is deprecated. Please use ')
section.style.ref(
self._replacement_service_name,
self._replacement_service_name,
)
section.write(' instead.')
section.style.end_important()
def document_copy_source_form(section, event_name, **kwargs):
if 'request-example' in event_name:
parent = section.get_section('structure-value')
param_line = parent.get_section('CopySource')
value_portion = param_line.get_section('member-value')
value_portion.clear_text()
value_portion.write("'string' or {'Bucket': 'string', "
"'Key': 'string', 'VersionId': 'string'}")
elif 'request-params' in event_name:
param_section = section.get_section('CopySource')
type_section = param_section.get_section('param-type')
type_section.clear_text()
type_section.write(':type CopySource: str or dict')
doc_section = param_section.get_section('param-documentation')
doc_section.clear_text()
doc_section.write(
"The name of the source bucket, key name of the source object, "
"and optional version ID of the source object. You can either "
"provide this value as a string or a dictionary. The "
"string form is {bucket}/{key} or "
"{bucket}/{key}?versionId={versionId} if you want to copy a "
"specific version. You can also provide this value as a "
"dictionary. The dictionary format is recommended over "
"the string format because it is more explicit. The dictionary "
"format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}."
" Note that the VersionId key is optional and may be omitted."
" To specify an S3 access point, provide the access point"
" ARN for the ``Bucket`` key in the copy source dictionary. If you"
" want to provide the copy source for an S3 access point as a"
" string instead of a dictionary, the ARN provided must be the"
" full S3 access point object ARN"
" (i.e. {accesspoint_arn}/object/{key})"
)
def handle_copy_source_param(params, **kwargs):
"""Convert CopySource param for CopyObject/UploadPartCopy.
This handler will deal with two cases:
* CopySource provided as a string. We'll make a best effort
to URL encode the key name as required. This will require
parsing the bucket and version id from the CopySource value
and only encoding the key.
* CopySource provided as a dict. In this case we're
explicitly given the Bucket, Key, and VersionId so we're
able to encode the key and ensure this value is serialized
and correctly sent to S3.
"""
source = params.get('CopySource')
if source is None:
# The call will eventually fail but we'll let the
# param validator take care of this. It will
# give a better error message.
return
if isinstance(source, six.string_types):
params['CopySource'] = _quote_source_header(source)
elif isinstance(source, dict):
params['CopySource'] = _quote_source_header_from_dict(source)
def _quote_source_header_from_dict(source_dict):
try:
bucket = source_dict['Bucket']
key = source_dict['Key']
version_id = source_dict.get('VersionId')
if VALID_S3_ARN.search(bucket):
final = '%s/object/%s' % (bucket, key)
else:
final = '%s/%s' % (bucket, key)
except KeyError as e:
raise ParamValidationError(
report='Missing required parameter: %s' % str(e))
final = percent_encode(final, safe=SAFE_CHARS + '/')
if version_id is not None:
final += '?versionId=%s' % version_id
return final
def _quote_source_header(value):
result = VERSION_ID_SUFFIX.search(value)
if result is None:
return percent_encode(value, safe=SAFE_CHARS + '/')
else:
first, version_id = value[:result.start()], value[result.start():]
return percent_encode(first, safe=SAFE_CHARS + '/') + version_id
def _get_cross_region_presigned_url(request_signer, request_dict, model,
source_region, destination_region):
# The better way to do this is to actually get the
# endpoint_resolver and get the endpoint_url given the
# source region. In this specific case, we know that
# we can safely replace the dest region with the source
# region because of the supported EC2 regions, but in
# general this is not a safe assumption to make.
# I think eventually we should try to plumb through something
# that allows us to resolve endpoints from regions.
request_dict_copy = copy.deepcopy(request_dict)
request_dict_copy['body']['DestinationRegion'] = destination_region
request_dict_copy['url'] = request_dict['url'].replace(
destination_region, source_region)
request_dict_copy['method'] = 'GET'
request_dict_copy['headers'] = {}
return request_signer.generate_presigned_url(
request_dict_copy, region_name=source_region,
operation_name=model.name)
def _get_presigned_url_source_and_destination_regions(request_signer, params):
# Gets the source and destination regions to be used
destination_region = request_signer._region_name
source_region = params.get('SourceRegion')
return source_region, destination_region
def inject_presigned_url_ec2(params, request_signer, model, **kwargs):
# The customer can still provide this, so we should pass if they do.
if 'PresignedUrl' in params['body']:
return
src, dest = _get_presigned_url_source_and_destination_regions(
request_signer, params['body'])
url = _get_cross_region_presigned_url(
request_signer, params, model, src, dest)
params['body']['PresignedUrl'] = url
# EC2 Requires that the destination region be sent over the wire in
# addition to the source region.
params['body']['DestinationRegion'] = dest
def inject_presigned_url_rds(params, request_signer, model, **kwargs):
# SourceRegion is not required for RDS operations, so it's possible that
# it isn't set. In that case it's probably a local copy so we don't need
# to do anything else.
if 'SourceRegion' not in params['body']:
return
src, dest = _get_presigned_url_source_and_destination_regions(
request_signer, params['body'])
# Since SourceRegion isn't actually modeled for RDS, it needs to be
# removed from the request params before we send the actual request.
del params['body']['SourceRegion']
if 'PreSignedUrl' in params['body']:
return
url = _get_cross_region_presigned_url(
request_signer, params, model, src, dest)
params['body']['PreSignedUrl'] = url
def json_decode_policies(parsed, model, **kwargs):
# Any time an IAM operation returns a policy document
# it is a string that is json that has been urlencoded,
# i.e urlencode(json.dumps(policy_document)).
# To give users something more useful, we will urldecode
# this value and json.loads() the result so that they have
# the policy document as a dictionary.
output_shape = model.output_shape
if output_shape is not None:
_decode_policy_types(parsed, model.output_shape)
def _decode_policy_types(parsed, shape):
# IAM consistently uses the policyDocumentType shape to indicate
# strings that have policy documents.
shape_name = 'policyDocumentType'
if shape.type_name == 'structure':
for member_name, member_shape in shape.members.items():
if member_shape.type_name == 'string' and \
member_shape.name == shape_name and \
member_name in parsed:
parsed[member_name] = decode_quoted_jsondoc(
parsed[member_name])
elif member_name in parsed:
_decode_policy_types(parsed[member_name], member_shape)
if shape.type_name == 'list':
shape_member = shape.member
for item in parsed:
_decode_policy_types(item, shape_member)
def parse_get_bucket_location(parsed, http_response, **kwargs):
# s3.GetBucketLocation cannot be modeled properly. To
# account for this we just manually parse the XML document.
# The "parsed" passed in only has the ResponseMetadata
# filled out. This handler will fill in the LocationConstraint
# value.
if http_response.raw is None:
return
response_body = http_response.content
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding='utf-8')
parser.feed(response_body)
root = parser.close()
region = root.text
parsed['LocationConstraint'] = region
def base64_encode_user_data(params, **kwargs):
if 'UserData' in params:
if isinstance(params['UserData'], six.text_type):
# Encode it to bytes if it is text.
params['UserData'] = params['UserData'].encode('utf-8')
params['UserData'] = base64.b64encode(
params['UserData']).decode('utf-8')
def document_base64_encoding(param):
description = ('**This value will be base64 encoded automatically. Do '
'not base64 encode this value prior to performing the '
'operation.**')
append = AppendParamDocumentation(param, description)
return append.append_documentation
def validate_ascii_metadata(params, **kwargs):
"""Verify S3 Metadata only contains ascii characters.
From: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
"Amazon S3 stores user-defined metadata in lowercase. Each name, value pair
must conform to US-ASCII when using REST and UTF-8 when using SOAP or
browser-based uploads via POST."
"""
metadata = params.get('Metadata')
if not metadata or not isinstance(metadata, dict):
# We have to at least type check the metadata as a dict type
# because this handler is called before param validation.
# We'll go ahead and return because the param validator will
# give a descriptive error message for us.
# We might need a post-param validation event.
return
for key, value in metadata.items():
try:
key.encode('ascii')
value.encode('ascii')
except UnicodeEncodeError:
error_msg = (
'Non ascii characters found in S3 metadata '
'for key "%s", value: "%s". \nS3 metadata can only '
'contain ASCII characters. ' % (key, value)
)
raise ParamValidationError(
report=error_msg)
def fix_route53_ids(params, model, **kwargs):
"""
Check for and split apart Route53 resource IDs, setting
only the last piece. This allows the output of one operation
(e.g. ``'foo/1234'``) to be used as input in another
operation (e.g. it expects just ``'1234'``).
"""
input_shape = model.input_shape
if not input_shape or not hasattr(input_shape, 'members'):
return
members = [name for (name, shape) in input_shape.members.items()
if shape.name in ['ResourceId', 'DelegationSetId']]
for name in members:
if name in params:
orig_value = params[name]
params[name] = orig_value.split('/')[-1]
logger.debug('%s %s -> %s', name, orig_value, params[name])
def inject_account_id(params, **kwargs):
if params.get('accountId') is None:
# Glacier requires accountId, but allows you
# to specify '-' for the current owners account.
# We add this default value if the user does not
# provide the accountId as a convenience.
params['accountId'] = '-'
def add_glacier_version(model, params, **kwargs):
request_dict = params
request_dict['headers']['x-amz-glacier-version'] = model.metadata[
'apiVersion']
def add_accept_header(model, params, **kwargs):
if params['headers'].get('Accept', None) is None:
request_dict = params
request_dict['headers']['Accept'] = 'application/json'
def add_glacier_checksums(params, **kwargs):
"""Add glacier checksums to the http request.
This will add two headers to the http request:
* x-amz-content-sha256
* x-amz-sha256-tree-hash
These values will only be added if they are not present
in the HTTP request.
"""
request_dict = params
headers = request_dict['headers']
body = request_dict['body']
if isinstance(body, six.binary_type):
# If the user provided a bytes type instead of a file
# like object, we're temporarily create a BytesIO object
# so we can use the util functions to calculate the
# checksums which assume file like objects. Note that
# we're not actually changing the body in the request_dict.
body = six.BytesIO(body)
starting_position = body.tell()
if 'x-amz-content-sha256' not in headers:
headers['x-amz-content-sha256'] = utils.calculate_sha256(
body, as_hex=True)
body.seek(starting_position)
if 'x-amz-sha256-tree-hash' not in headers:
headers['x-amz-sha256-tree-hash'] = utils.calculate_tree_hash(body)
body.seek(starting_position)
def document_glacier_tree_hash_checksum():
doc = '''
This is a required field.
Ideally you will want to compute this value with checksums from
previous uploaded parts, using the algorithm described in
`Glacier documentation <http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html>`_.
But if you prefer, you can also use botocore.utils.calculate_tree_hash()
to compute it from raw file by::
checksum = calculate_tree_hash(open('your_file.txt', 'rb'))
'''
return AppendParamDocumentation('checksum', doc).append_documentation
def document_cloudformation_get_template_return_type(section, event_name, **kwargs):
if 'response-params' in event_name:
template_body_section = section.get_section('TemplateBody')
type_section = template_body_section.get_section('param-type')
type_section.clear_text()
type_section.write('(*dict*) --')
elif 'response-example' in event_name:
parent = section.get_section('structure-value')
param_line = parent.get_section('TemplateBody')
value_portion = param_line.get_section('member-value')
value_portion.clear_text()
value_portion.write('{}')
def switch_host_machinelearning(request, **kwargs):
switch_host_with_param(request, 'PredictEndpoint')
def check_openssl_supports_tls_version_1_2(**kwargs):
import ssl
try:
openssl_version_tuple = ssl.OPENSSL_VERSION_INFO
if openssl_version_tuple < (1, 0, 1):
warnings.warn(
'Currently installed openssl version: %s does not '
'support TLS 1.2, which is required for use of iot-data. '
'Please use python installed with openssl version 1.0.1 or '
'higher.' % (ssl.OPENSSL_VERSION),
UnsupportedTLSVersionWarning
)
# We cannot check the openssl version on python2.6, so we should just
# pass on this conveniency check.
except AttributeError:
pass
def change_get_to_post(request, **kwargs):
# This is useful when we need to change a potentially large GET request
# into a POST with x-www-form-urlencoded encoding.
if request.method == 'GET' and '?' in request.url:
request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
request.method = 'POST'
request.url, request.data = request.url.split('?', 1)
def set_list_objects_encoding_type_url(params, context, **kwargs):
if 'EncodingType' not in params:
# We set this context so that we know it wasn't the customer that
# requested the encoding.
context['encoding_type_auto_set'] = True
params['EncodingType'] = 'url'
def decode_list_object(parsed, context, **kwargs):
# This is needed because we are passing url as the encoding type. Since the
# paginator is based on the key, we need to handle it before it can be
# round tripped.
#
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# Delimiter, Marker, Prefix, NextMarker, Key.
_decode_list_object(
top_level_keys=['Delimiter', 'Marker', 'NextMarker'],
nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
parsed=parsed,
context=context
)
def decode_list_object_v2(parsed, context, **kwargs):
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# Delimiter, Prefix, ContinuationToken, Key, and StartAfter.
_decode_list_object(
top_level_keys=['Delimiter', 'Prefix', 'StartAfter'],
nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
parsed=parsed,
context=context
)
def decode_list_object_versions(parsed, context, **kwargs):
# From the documentation: If you specify encoding-type request parameter,
# Amazon S3 includes this element in the response, and returns encoded key
# name values in the following response elements:
# KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
_decode_list_object(
top_level_keys=[
'KeyMarker',
'NextKeyMarker',
'Prefix',
'Delimiter',
],
nested_keys=[
('Versions', 'Key'),
('DeleteMarkers', 'Key'),
('CommonPrefixes', 'Prefix'),
],
parsed=parsed,
context=context
)
def _decode_list_object(top_level_keys, nested_keys, parsed, context):
if parsed.get('EncodingType') == 'url' and context.get('encoding_type_auto_set'):
# URL decode top-level keys in the response if present.
for key in top_level_keys:
if key in parsed:
parsed[key] = unquote_str(parsed[key])
# URL decode nested keys from the response if present.
for (top_key, child_key) in nested_keys:
if top_key in parsed:
for member in parsed[top_key]:
member[child_key] = unquote_str(member[child_key])
def convert_body_to_file_like_object(params, **kwargs):
if 'Body' in params:
if isinstance(params['Body'], six.string_types):
params['Body'] = six.BytesIO(ensure_bytes(params['Body']))
elif isinstance(params['Body'], six.binary_type):
params['Body'] = six.BytesIO(params['Body'])
def _add_parameter_aliases(handler_list):
# Mapping of original parameter to parameter alias.
# The key is <service>.<operation>.parameter
# The first part of the key is used for event registration.
# The last part is the original parameter name and the value is the
# alias to expose in documentation.
aliases = {
'ec2.*.Filter': 'Filters',
'logs.CreateExportTask.from': 'fromTime',
'cloudsearchdomain.Search.return': 'returnFields'
}
for original, new_name in aliases.items():
event_portion, original_name = original.rsplit('.', 1)
parameter_alias = ParameterAlias(original_name, new_name)
# Add the handlers to the list of handlers.
# One handler is to handle when users provide the alias.
# The other handler is to update the documentation to show only
# the alias.
parameter_build_event_handler_tuple = (
'before-parameter-build.' + event_portion,
parameter_alias.alias_parameter_in_call,
REGISTER_FIRST
)
docs_event_handler_tuple = (
'docs.*.' + event_portion + '.complete-section',
parameter_alias.alias_parameter_in_documentation)
handler_list.append(parameter_build_event_handler_tuple)
handler_list.append(docs_event_handler_tuple)
class ParameterAlias(object):
def __init__(self, original_name, alias_name):
self._original_name = original_name
self._alias_name = alias_name
def alias_parameter_in_call(self, params, model, **kwargs):
if model.input_shape:
# Only consider accepting the alias if it is modeled in the
# input shape.
if self._original_name in model.input_shape.members:
if self._alias_name in params:
if self._original_name in params:
raise AliasConflictParameterError(
original=self._original_name,
alias=self._alias_name,
operation=model.name
)
# Remove the alias parameter value and use the old name
# instead.
params[self._original_name] = params.pop(self._alias_name)
def alias_parameter_in_documentation(self, event_name, section, **kwargs):
if event_name.startswith('docs.request-params'):
if self._original_name not in section.available_sections:
return
# Replace the name for parameter type
param_section = section.get_section(self._original_name)
param_type_section = param_section.get_section('param-type')
self._replace_content(param_type_section)
# Replace the name for the parameter description
param_name_section = param_section.get_section('param-name')
self._replace_content(param_name_section)
elif event_name.startswith('docs.request-example'):
section = section.get_section('structure-value')
if self._original_name not in section.available_sections:
return
# Replace the name for the example
param_section = section.get_section(self._original_name)
self._replace_content(param_section)
def _replace_content(self, section):
content = section.getvalue().decode('utf-8')
updated_content = content.replace(
self._original_name, self._alias_name)
section.clear_text()
section.write(updated_content)
class ClientMethodAlias(object):
def __init__(self, actual_name):
""" Aliases a non-extant method to an existing method.
:param actual_name: The name of the method that actually exists on
the client.
"""
self._actual = actual_name
def __call__(self, client, **kwargs):
return getattr(client, self._actual)
# TODO: Remove this class as it is no longer used
class HeaderToHostHoister(object):
"""Takes a header and moves it to the front of the hoststring.
"""
_VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(?<!-)$', re.IGNORECASE)
def __init__(self, header_name):
self._header_name = header_name
def hoist(self, params, **kwargs):
"""Hoist a header to the hostname.
Hoist a header to the beginning of the hostname with a suffix "." after
it. The original header should be removed from the header map. This
method is intended to be used as a target for the before-call event.
"""
if self._header_name not in params['headers']:
return
header_value = params['headers'][self._header_name]
self._ensure_header_is_valid_host(header_value)
original_url = params['url']
new_url = self._prepend_to_host(original_url, header_value)
params['url'] = new_url
def _ensure_header_is_valid_host(self, header):
match = self._VALID_HOSTNAME.match(header)
if not match:
raise ParamValidationError(report=(
'Hostnames must contain only - and alphanumeric characters, '
'and between 1 and 63 characters long.'
))
def _prepend_to_host(self, url, prefix):
url_components = urlsplit(url)
parts = url_components.netloc.split('.')
parts = [prefix] + parts
new_netloc = '.'.join(parts)
new_components = (
url_components.scheme,
new_netloc,
url_components.path,
url_components.query,
''
)
new_url = urlunsplit(new_components)
return new_url
def inject_api_version_header_if_needed(model, params, **kwargs):
if not model.is_endpoint_discovery_operation:
return
params['headers']['x-amz-api-version'] = model.service_model.api_version
def remove_lex_v2_start_conversation(class_attributes, **kwargs):
"""Operation requires h2 which is currently unsupported in Python"""
if 'start_conversation' in class_attributes:
del class_attributes['start_conversation']
def add_retry_headers(request, **kwargs):
retries_context = request.context.get('retries')
if not retries_context:
return
headers = request.headers
headers['amz-sdk-invocation-id'] = retries_context['invocation-id']
sdk_retry_keys = ('ttl', 'attempt', 'max')
sdk_request_headers = [
f'{key}={retries_context[key]}'
for key in sdk_retry_keys
if key in retries_context
]
headers['amz-sdk-request'] = '; '.join(sdk_request_headers)
# This is a list of (event_name, handler).
# When a Session is created, everything in this list will be
# automatically registered with that Session.
BUILTIN_HANDLERS = [
('choose-service-name', handle_service_name_alias),
('getattr.mturk.list_hi_ts_for_qualification_type',
ClientMethodAlias('list_hits_for_qualification_type')),
('before-parameter-build.s3.UploadPart',
convert_body_to_file_like_object, REGISTER_LAST),
('before-parameter-build.s3.PutObject',
convert_body_to_file_like_object, REGISTER_LAST),
('creating-client-class', add_generate_presigned_url),
('creating-client-class.s3', add_generate_presigned_post),
('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2),
('creating-client-class.lex-runtime-v2', remove_lex_v2_start_conversation),
('after-call.iam', json_decode_policies),
('after-call.ec2.GetConsoleOutput', decode_console_output),
('after-call.cloudformation.GetTemplate', json_decode_template_body),
('after-call.s3.GetBucketLocation', parse_get_bucket_location),
('before-parameter-build', generate_idempotent_uuid),
('before-parameter-build.s3', validate_bucket_name),
('before-parameter-build.s3.ListObjects',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.ListObjectsV2',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.ListObjectVersions',
set_list_objects_encoding_type_url),
('before-parameter-build.s3.CopyObject',
handle_copy_source_param),
('before-parameter-build.s3.UploadPartCopy',
handle_copy_source_param),
('before-parameter-build.s3.CopyObject', validate_ascii_metadata),
('before-parameter-build.s3.PutObject', validate_ascii_metadata),
('before-parameter-build.s3.CreateMultipartUpload',
validate_ascii_metadata),
('docs.*.s3.CopyObject.complete-section', document_copy_source_form),
('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form),
('before-call', add_recursion_detection_header),
('before-call.s3', add_expect_header),
('before-call.glacier', add_glacier_version),
('before-call.apigateway', add_accept_header),
('before-call.s3.PutObject', conditionally_calculate_md5),
('before-call.s3.UploadPart', conditionally_calculate_md5),
('before-call.s3.DeleteObjects', escape_xml_payload),
('before-call.s3.PutBucketLifecycleConfiguration', escape_xml_payload),
('before-call.glacier.UploadArchive', add_glacier_checksums),
('before-call.glacier.UploadMultipartPart', add_glacier_checksums),
('before-call.ec2.CopySnapshot', inject_presigned_url_ec2),
('request-created', add_retry_headers),
('request-created.machinelearning.Predict', switch_host_machinelearning),
('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CompleteMultipartUpload', check_for_200_error,
REGISTER_FIRST),
('choose-signer.cognito-identity.GetId', disable_signing),
('choose-signer.cognito-identity.GetOpenIdToken', disable_signing),
('choose-signer.cognito-identity.UnlinkIdentity', disable_signing),
('choose-signer.cognito-identity.GetCredentialsForIdentity',
disable_signing),
('choose-signer.sts.AssumeRoleWithSAML', disable_signing),
('choose-signer.sts.AssumeRoleWithWebIdentity', disable_signing),
('choose-signer', set_operation_specific_signer),
('before-parameter-build.s3.HeadObject', sse_md5),
('before-parameter-build.s3.GetObject', sse_md5),
('before-parameter-build.s3.PutObject', sse_md5),
('before-parameter-build.s3.CopyObject', sse_md5),
('before-parameter-build.s3.CopyObject', copy_source_sse_md5),
('before-parameter-build.s3.CreateMultipartUpload', sse_md5),
('before-parameter-build.s3.UploadPart', sse_md5),
('before-parameter-build.s3.UploadPartCopy', sse_md5),
('before-parameter-build.s3.UploadPartCopy', copy_source_sse_md5),
('before-parameter-build.ec2.RunInstances', base64_encode_user_data),
('before-parameter-build.autoscaling.CreateLaunchConfiguration',
base64_encode_user_data),
('before-parameter-build.route53', fix_route53_ids),
('before-parameter-build.glacier', inject_account_id),
('after-call.s3.ListObjects', decode_list_object),
('after-call.s3.ListObjectsV2', decode_list_object_v2),
('after-call.s3.ListObjectVersions', decode_list_object_versions),
# Cloudsearchdomain search operation will be sent by HTTP POST
('request-created.cloudsearchdomain.Search',
change_get_to_post),
# Glacier documentation customizations
('docs.*.glacier.*.complete-section',
AutoPopulatedParam('accountId', 'Note: this parameter is set to "-" by'
'default if no value is not specified.')
.document_auto_populated_param),
('docs.*.glacier.UploadArchive.complete-section',
AutoPopulatedParam('checksum').document_auto_populated_param),
('docs.*.glacier.UploadMultipartPart.complete-section',
AutoPopulatedParam('checksum').document_auto_populated_param),
('docs.request-params.glacier.CompleteMultipartUpload.complete-section',
document_glacier_tree_hash_checksum()),
# Cloudformation documentation customizations
('docs.*.cloudformation.GetTemplate.complete-section',
document_cloudformation_get_template_return_type),
# UserData base64 encoding documentation customizations
('docs.*.ec2.RunInstances.complete-section',
document_base64_encoding('UserData')),
('docs.*.autoscaling.CreateLaunchConfiguration.complete-section',
document_base64_encoding('UserData')),
# EC2 CopySnapshot documentation customizations
('docs.*.ec2.CopySnapshot.complete-section',
AutoPopulatedParam('PresignedUrl').document_auto_populated_param),
('docs.*.ec2.CopySnapshot.complete-section',
AutoPopulatedParam('DestinationRegion').document_auto_populated_param),
# S3 SSE documentation modifications
('docs.*.s3.*.complete-section',
AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param),
# S3 SSE Copy Source documentation modifications
('docs.*.s3.*.complete-section',
AutoPopulatedParam('CopySourceSSECustomerKeyMD5').document_auto_populated_param),
# Add base64 information to Lambda
('docs.*.lambda.UpdateFunctionCode.complete-section',
document_base64_encoding('ZipFile')),
# The following S3 operations cannot actually accept a ContentMD5
('docs.*.s3.*.complete-section',
HideParamFromOperations(
's3', 'ContentMD5',
['DeleteObjects', 'PutBucketAcl', 'PutBucketCors',
'PutBucketLifecycle', 'PutBucketLogging', 'PutBucketNotification',
'PutBucketPolicy', 'PutBucketReplication', 'PutBucketRequestPayment',
'PutBucketTagging', 'PutBucketVersioning', 'PutBucketWebsite',
'PutObjectAcl']).hide_param),
#############
# RDS
#############
('creating-client-class.rds', add_generate_db_auth_token),
('before-call.rds.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.rds.CreateDBCluster',
inject_presigned_url_rds),
('before-call.rds.CopyDBSnapshot',
inject_presigned_url_rds),
('before-call.rds.CreateDBInstanceReadReplica',
inject_presigned_url_rds),
('before-call.rds.StartDBInstanceAutomatedBackupsReplication',
inject_presigned_url_rds),
# RDS PresignedUrl documentation customizations
('docs.*.rds.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CopyDBSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.CreateDBInstanceReadReplica.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
#############
# Neptune
#############
('before-call.neptune.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.neptune.CreateDBCluster',
inject_presigned_url_rds),
# Neptune PresignedUrl documentation customizations
('docs.*.neptune.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.neptune.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
#############
# DocDB
#############
('before-call.docdb.CopyDBClusterSnapshot',
inject_presigned_url_rds),
('before-call.docdb.CreateDBCluster',
inject_presigned_url_rds),
# DocDB PresignedUrl documentation customizations
('docs.*.docdb.CopyDBClusterSnapshot.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
('docs.*.docdb.CreateDBCluster.complete-section',
AutoPopulatedParam('PreSignedUrl').document_auto_populated_param),
###########
# SMS Voice
###########
('docs.title.sms-voice',
DeprecatedServiceDocumenter('pinpoint-sms-voice').inject_deprecation_notice),
('before-call', inject_api_version_header_if_needed),
]
_add_parameter_aliases(BUILTIN_HANDLERS)
|
|
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from collections import namedtuple
from math import pi, sqrt
from threading import Thread, Lock
import rospy
from geometry_msgs.msg import Twist
from kobuki_msgs.msg import BumperEvent
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
PI2 = 2 * pi
LINEAR = 0.2 # m/s
ANGULAR = pi/6 # rad/s
def angle_distance(alpha, beta):
d = alpha - beta
s = 1 if (d >= 0.0 and d <= pi) or (d <= -pi and d >= -PI2) else -1
d = abs(d) % PI2
r = PI2 - d if d > pi else d
return r * s
###############################################################################
# Robot State
###############################################################################
State = namedtuple("State", ["translation", "rotation",
"bump_center", "bump_left", "bump_right"])
class Robot(object):
def __init__(self):
self.translation = 0.0
self.rotation = 0.0
self.bump_left = False
self.bump_right = False
self.bump_center = False
self.x = 0.0
self.y = 0.0
self.a = 0.0
self.lock = Lock()
# Thread: subscriber
def set_odometry(self, x, y, a):
with self.lock:
if not (self.bump_center or self.bump_left or self.bump_right):
self.translation += sqrt((x - self.x)**2 + (y - self.y)**2)
self.rotation += abs(angle_distance(a, self.a))
self.x = x
self.y = y
self.a = a
# Thread: subscriber
def set_bumper(self, center, left, right):
with self.lock:
if center:
self.bump_center = True
if left:
self.bump_left = True
if right:
self.bump_right = True
# Thread: publisher
def get_state(self):
with self.lock:
state = State(self.translation, self.rotation,
self.bump_center, self.bump_left, self.bump_right)
self.translation = 0.0
self.rotation = 0.0
self.bump_center = False
self.bump_left = False
self.bump_right = False
return state
###############################################################################
# Publisher
###############################################################################
class Publisher(object):
def __init__(self, robot, callbacks):
self.robot = robot
self.to_walk = 0.0
self.to_rotate = 0.0
self.change_cmd = 0
self.commands = []
self.contador = 0
self.thread = None
self.twist = None
self.shutdown = False
self.stop_msg = Twist()
self.stop_msg.linear.x = 0
self.stop_msg.linear.y = 0
self.stop_msg.linear.z = 0
self.stop_msg.angular.x = 0
self.stop_msg.angular.y = 0
self.stop_msg.angular.z = 0
self.init = callbacks.get("init") or self.skip
self.bump_center = callbacks.get("bump_center") or self.skip
self.bump_left = callbacks.get("bump_left") or self.skip
self.bump_right = callbacks.get("bump_right") or self.skip
self.walk_done = callbacks.get("walk_done") or self.skip
self.rotate_done = callbacks.get("rotate_done") or self.skip
def start(self):
self.thread = Thread(target = self.spin)
self.thread.daemon = True
self.thread.start()
def spin(self):
rate = rospy.Rate(15)
cmd_vel = rospy.Publisher("cmd_vel", Twist, queue_size = 10)
self.set_twist(0.0, 0.0)
self.init(self)
while not self.shutdown:
state = self.robot.get_state()
if self.change_cmd:
self.change_cmd -= 1
cmd_vel.publish(self.stop_msg)
else:
if self.to_walk > 0.0:
self.to_walk -= state.translation
if self.to_walk <= 0.0:
self.to_walk = 0.0
self.set_twist(0.0, 0.0)
self.walk_done(self)
if self.change_cmd:
cmd_vel.publish(self.stop_msg)
else:
cmd_vel.publish(self.twist)
if self.to_rotate > 0.0:
self.to_rotate -= state.rotation
if self.to_rotate <= 0.0:
self.to_rotate = 0.0
self.set_twist(0.0, 0.0)
self.rotate_done(self)
if self.change_cmd:
cmd_vel.publish(self.stop_msg)
else:
cmd_vel.publish(self.twist)
if state.bump_center:
self.bump_center(self)
elif state.bump_left:
self.bump_left(self)
elif state.bump_right:
self.bump_right(self)
rate.sleep()
cmd_vel.unregister()
def set_twist(self, vx, wz):
if vx == 0.0 and wz == 0.0:
self.twist = self.stop_msg
else:
self.twist = Twist()
self.twist.linear.x = vx
self.twist.linear.y = 0
self.twist.linear.z = 0
self.twist.angular.x = 0
self.twist.angular.y = 0
self.twist.angular.z = wz
def skip(self, robot):
if self.commands:
cmd, val = self.commands.pop(0)
cmd(val)
else:
self.terminar()
def andar(self, meters):
self.change_cmd = 3
self.to_rotate = 0.0
if meters > 0:
self.to_walk = meters
self.set_twist(LINEAR, 0.0)
def rodar(self, radians):
self.change_cmd = 3
self.to_walk = 0.0
if radians > 0:
self.to_rotate = radians
self.set_twist(0.0, ANGULAR)
elif radians < 0:
self.to_rotate = -radians
self.set_twist(0.0, -ANGULAR)
def executar_depois(self, cmd, value):
self.commands.append((getattr(self, cmd), value))
def cancelar_comando(self):
if self.to_walk > 0.0 or self.to_rotate > 0.0:
self.to_walk = 0.0
self.to_rotate = 0.0
self.set_twist(0.0, 0.0)
if self.commands:
cmd, val = self.commands.pop(0)
cmd(val)
def conta(self):
self.contador += 1
def desconta(self):
self.contador -= 1
def terminar(self):
self.to_walk = 0.0
self.to_rotate = 0.0
self.set_twist(0.0, 0.0)
self.shutdown = True
###############################################################################
# Robot Controller
###############################################################################
class RobotController(object):
def __init__(self, callbacks):
self.robot = Robot()
self.publisher = Publisher(self.robot, callbacks)
self.odom = None
self.bump = None
self.odom_callback = self._on_first_odom
def run(self):
rospy.init_node("turtlebot")
self.odom = rospy.Subscriber("odom", Odometry, self._on_odom)
self.bump = rospy.Subscriber("events/bumper", BumperEvent, self._on_bump)
try:
rospy.spin()
except rospy.ROSInterruptException as e:
pass
finally:
self.odom.unregister()
self.odom = None
self.bump.unregister()
self.bump = None
self.publisher.shutdown = True
self.publisher.thread.join()
self.publisher.thread = None
def _on_odom(self, msg):
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
q = msg.pose.pose.orientation
(roll, pitch, yaw) = euler_from_quaternion((q.x, q.y, q.z, q.w))
self.odom_callback(x, y, yaw)
def _on_first_odom(self, x, y, a):
self.robot.x = x
self.robot.y = y
self.robot.a = a
self.publisher.start()
self.odom_callback = self.robot.set_odometry
def _on_bump(self, msg):
if msg.state == BumperEvent.PRESSED:
if msg.bumper == BumperEvent.CENTER:
self.robot.set_bumper(True, False, False)
elif msg.bumper == BumperEvent.LEFT:
self.robot.set_bumper(False, True, False)
elif msg.bumper == BumperEvent.RIGHT:
self.robot.set_bumper(False, False, True)
|
|
from functools import partial
import sys
import textwrap
from .vendor import six
from .context import Context
from .loader import Loader
from .parser import Parser, Context as ParserContext, Argument
from .executor import Executor
from .exceptions import Failure, CollectionNotFound, ParseError
from .util import debug, pty_size
from ._version import __version__
def task_name_to_key(x):
return (x.count('.'), x)
sort_names = partial(sorted, key=task_name_to_key)
indent_num = 2
indent = " " * indent_num
def print_help(tuples):
padding = 3
# Calculate column sizes: don't wrap flag specs, give what's left over
# to the descriptions.
flag_width = max(len(x[0]) for x in tuples)
desc_width = pty_size()[0] - flag_width - indent_num - padding - 1
wrapper = textwrap.TextWrapper(width=desc_width)
for flag_spec, help_str in tuples:
# Wrap descriptions/help text
help_chunks = wrapper.wrap(help_str)
# Print flag spec + padding
flag_padding = flag_width - len(flag_spec)
spec = ''.join((
indent,
flag_spec,
flag_padding * ' ',
padding * ' '
))
# Print help text as needed
if help_chunks:
print(spec + help_chunks[0])
for chunk in help_chunks[1:]:
print((' ' * len(spec)) + chunk)
else:
print(spec)
print('')
def parse_gracefully(parser, argv):
"""
Run ``parser.parse_argv(argv)`` & gracefully handle ``ParseError``.
'Gracefully' meaning to print a useful human-facing error message instead
of a traceback; the program will still exit if an error is raised.
If no error is raised, returns the result of the ``parse_argv`` call.
"""
try:
return parser.parse_argv(argv)
except ParseError as e:
sys.exit(str(e))
def parse(argv, collection=None):
"""
Parse ``argv`` list-of-strings into useful core & per-task structures.
:returns:
Three-tuple of ``args`` (core, non-task `.Argument` objects), ``collection``
(compiled `.Collection` of tasks, using defaults or core arguments
affecting collection generation) and ``tasks`` (a list of
`~.parser.context.Context` objects representing the requested task
executions).
"""
# Initial/core parsing (core options can affect the rest of the parsing)
initial_context = ParserContext(args=(
# TODO: make '--collection' a list-building arg, not a string
Argument(
names=('collection', 'c'),
help="Specify collection name to load. May be given >1 time."
),
Argument(
names=('root', 'r'),
help="Change root directory used for finding task modules."
),
Argument(
names=('help', 'h'),
optional=True,
help="Show core or per-task help and exit."
),
Argument(
names=('version', 'V'),
kind=bool,
default=False,
help="Show version and exit."
),
Argument(
names=('list', 'l'),
kind=bool,
default=False,
help="List available tasks."
),
Argument(
names=('no-dedupe',),
kind=bool,
default=False,
help="Disable task deduplication."
),
Argument(
names=('echo', 'e'),
kind=bool,
default=False,
help="Echo executed commands before running.",
),
Argument(
names=('warn-only', 'w'),
kind=bool,
default=False,
help="Warn, instead of failing, when shell commands fail.",
),
Argument(
names=('pty', 'p'),
kind=bool,
default=False,
help="Use a pty when executing shell commands.",
),
Argument(
names=('hide', 'H'),
help="Set default value of run()'s 'hide' kwarg.",
)
))
# 'core' will result an .unparsed attribute with what was left over.
debug("Parsing initial context (core args)")
parser = Parser(initial=initial_context, ignore_unknown=True)
core = parse_gracefully(parser, argv)
debug("After core-args pass, leftover argv: %r" % (core.unparsed,))
args = core[0].args
# Print version & exit if necessary
if args.version.value:
print("Invoke %s" % __version__)
sys.exit(0)
# Core (no value given) --help output
# TODO: if this wants to display context sensitive help (e.g. a combo help
# and available tasks listing; or core flags modified by plugins/task
# modules) it will have to move farther down.
if args.help.value == True:
print("Usage: inv[oke] [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts]")
print("")
print("Core options:")
print_help(initial_context.help_tuples())
sys.exit(0)
# Load collection (default or specified) and parse leftovers
# (Skip loading if somebody gave us an explicit task collection.)
if not collection:
debug("No collection given, loading from %r" % args.root.value)
loader = Loader(root=args.root.value)
collection = loader.load_collection(args.collection.value)
parser = Parser(contexts=collection.to_contexts())
debug("Parsing actual tasks against collection %r" % collection)
tasks = parse_gracefully(parser, core.unparsed)
# Per-task help. Use the parser's contexts dict as that's the easiest way
# to obtain Context objects here - which are what help output needs.
name = args.help.value
if name in parser.contexts:
# Setup
ctx = parser.contexts[name]
tuples = ctx.help_tuples()
docstring = collection[name].__doc__
header = "Usage: inv[oke] [--core-opts] %s %%s[other tasks here ...]" % name
print(header % ("[--options] " if tuples else ""))
print("")
print("Docstring:")
if docstring:
# Really wish textwrap worked better for this.
doclines = docstring.lstrip().splitlines()
for line in doclines:
print(indent + textwrap.dedent(line))
# Print trailing blank line if docstring didn't end with one
if textwrap.dedent(doclines[-1]):
print("")
else:
print(indent + "none")
print("")
print("Options:")
if tuples:
print_help(tuples)
else:
print(indent + "none")
print("")
sys.exit(0)
# Print discovered tasks if necessary
if args.list.value:
print("Available tasks:\n")
# Sort in depth, then alpha, order
task_names = collection.task_names
names = sort_names(task_names.keys())
for primary in names:
aliases = sort_names(task_names[primary])
out = primary
if aliases:
out += " (%s)" % ', '.join(aliases)
print(" %s" % out)
print("")
sys.exit(0)
# Return to caller so they can handle the results
return args, collection, tasks
def derive_opts(args):
run = {}
if args['warn-only'].value:
run['warn'] = True
if args.pty.value:
run['pty'] = True
if args.hide.value:
run['hide'] = args.hide.value
if args.echo.value:
run['echo'] = True
return {'run': run}
def dispatch(argv):
args, collection, tasks = parse(argv)
results = []
executor = Executor(collection, Context(**derive_opts(args)))
# Take action based on 'core' options and the 'tasks' found
for context in tasks:
kwargs = {}
for _, arg in six.iteritems(context.args):
# Use the arg obj's internal name - not what was necessarily given
# on the CLI. (E.g. --my-option vs --my_option for
# mytask(my_option=xxx) requires this.)
# TODO: store 'given' name somewhere in case somebody wants to see
# it when handling args.
kwargs[arg.name] = arg.value
try:
# TODO: allow swapping out of Executor subclasses based on core
# config options
results.append(executor.execute(
name=context.name,
kwargs=kwargs,
dedupe=not args['no-dedupe']
))
except Failure as f:
sys.exit(f.result.exited)
return results
def main():
# Parse command line
argv = sys.argv[1:]
debug("Base argv from sys: %r" % (argv,))
dispatch(argv)
|
|
from collections import OrderedDict, defaultdict, namedtuple, Counter
from collections.abc import Iterable
from copy import deepcopy
from numbers import Real
from pathlib import Path
import re
import warnings
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.data
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
# Units for density supported by OpenMC
DENSITY_UNITS = ('g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum',
'macro')
NuclideTuple = namedtuple('NuclideTuple', ['name', 'percent', 'percent_type'])
class Material(IDManagerMixin):
"""A material composed of a collection of nuclides/elements.
To create a material, one should create an instance of this class, add
nuclides or elements with :meth:`Material.add_nuclide` or
`Material.add_element`, respectively, and set the total material density
with `Material.set_density()`. The material can then be assigned to a cell
using the :attr:`Cell.fill` attribute.
Parameters
----------
material_id : int, optional
Unique identifier for the material. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the material. If not specified, the name will be the empty
string.
temperature : float, optional
Temperature of the material in Kelvin. If not specified, the material
inherits the default temperature applied to the model.
Attributes
----------
id : int
Unique identifier for the material
temperature : float
Temperature of the material in Kelvin.
density : float
Density of the material (units defined separately)
density_units : str
Units used for `density`. Can be one of 'g/cm3', 'g/cc', 'kg/m3',
'atom/b-cm', 'atom/cm3', 'sum', or 'macro'. The 'macro' unit only
applies in the case of a multi-group calculation.
depletable : bool
Indicate whether the material is depletable.
nuclides : list of namedtuple
List in which each item is a namedtuple consisting of a nuclide string,
the percent density, and the percent type ('ao' or 'wo'). The namedtuple
has field names ``name``, ``percent``, and ``percent_type``.
isotropic : list of str
Nuclides for which elastic scattering should be treated as though it
were isotropic in the laboratory system.
average_molar_mass : float
The average molar mass of nuclides in the material in units of grams per
mol. For example, UO2 with 3 nuclides will have an average molar mass
of 270 / 3 = 90 g / mol.
volume : float
Volume of the material in cm^3. This can either be set manually or
calculated in a stochastic volume calculation and added via the
:meth:`Material.add_volume_information` method.
paths : list of str
The paths traversed through the CSG tree to reach each material
instance. This property is initialized by calling the
:meth:`Geometry.determine_paths` method.
num_instances : int
The number of instances of this material throughout the geometry. This
property is initialized by calling the :meth:`Geometry.determine_paths`
method.
fissionable_mass : float
Mass of fissionable nuclides in the material in [g]. Requires that the
:attr:`volume` attribute is set.
"""
next_id = 1
used_ids = set()
def __init__(self, material_id=None, name='', temperature=None):
# Initialize class attributes
self.id = material_id
self.name = name
self.temperature = temperature
self._density = None
self._density_units = 'sum'
self._depletable = False
self._paths = None
self._num_instances = None
self._volume = None
self._atoms = {}
self._isotropic = []
# A list of tuples (nuclide, percent, percent type)
self._nuclides = []
# The single instance of Macroscopic data present in this material
# (only one is allowed, hence this is different than _nuclides, etc)
self._macroscopic = None
# If specified, a list of table names
self._sab = []
def __repr__(self):
string = 'Material\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tTemperature', self._temperature)
string += '{: <16}=\t{}'.format('\tDensity', self._density)
string += f' [{self._density_units}]\n'
string += '{: <16}\n'.format('\tS(a,b) Tables')
for sab in self._sab:
string += '{: <16}=\t{}\n'.format('\tS(a,b)', sab)
string += '{: <16}\n'.format('\tNuclides')
for nuclide, percent, percent_type in self._nuclides:
string += '{: <16}'.format('\t{}'.format(nuclide))
string += '=\t{: <12} [{}]\n'.format(percent, percent_type)
if self._macroscopic is not None:
string += '{: <16}\n'.format('\tMacroscopic Data')
string += '{: <16}'.format('\t{}'.format(self._macroscopic))
return string
@property
def name(self):
return self._name
@property
def temperature(self):
return self._temperature
@property
def density(self):
return self._density
@property
def density_units(self):
return self._density_units
@property
def depletable(self):
return self._depletable
@property
def paths(self):
if self._paths is None:
raise ValueError('Material instance paths have not been determined. '
'Call the Geometry.determine_paths() method.')
return self._paths
@property
def num_instances(self):
if self._num_instances is None:
raise ValueError(
'Number of material instances have not been determined. Call '
'the Geometry.determine_paths() method.')
return self._num_instances
@property
def nuclides(self):
return self._nuclides
@property
def isotropic(self):
return self._isotropic
@property
def average_molar_mass(self):
# Using the sum of specified atomic or weight amounts as a basis, sum
# the mass and moles of the material
mass = 0.
moles = 0.
for nuc in self.nuclides:
if nuc.percent_type == 'ao':
mass += nuc.percent * openmc.data.atomic_mass(nuc.name)
moles += nuc.percent
else:
moles += nuc.percent / openmc.data.atomic_mass(nuc.name)
mass += nuc.percent
# Compute and return the molar mass
return mass / moles
@property
def volume(self):
return self._volume
@name.setter
def name(self, name):
if name is not None:
cv.check_type(f'name for Material ID="{self._id}"',
name, str)
self._name = name
else:
self._name = ''
@temperature.setter
def temperature(self, temperature):
cv.check_type(f'Temperature for Material ID="{self._id}"',
temperature, (Real, type(None)))
self._temperature = temperature
@depletable.setter
def depletable(self, depletable):
cv.check_type(f'Depletable flag for Material ID="{self._id}"',
depletable, bool)
self._depletable = depletable
@volume.setter
def volume(self, volume):
if volume is not None:
cv.check_type('material volume', volume, Real)
self._volume = volume
@isotropic.setter
def isotropic(self, isotropic):
cv.check_iterable_type('Isotropic scattering nuclides', isotropic,
str)
self._isotropic = list(isotropic)
@property
def fissionable_mass(self):
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
Z = openmc.data.zam(nuc)[0]
if Z >= 90:
density += 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
return density*self.volume
@classmethod
def from_hdf5(cls, group):
"""Create material from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
Returns
-------
openmc.Material
Material instance
"""
mat_id = int(group.name.split('/')[-1].lstrip('material '))
name = group['name'][()].decode() if 'name' in group else ''
density = group['atom_density'][()]
if 'nuclide_densities' in group:
nuc_densities = group['nuclide_densities'][()]
# Create the Material
material = cls(mat_id, name)
material.depletable = bool(group.attrs['depletable'])
if 'volume' in group.attrs:
material.volume = group.attrs['volume']
if "temperature" in group.attrs:
material.temperature = group.attrs["temperature"]
# Read the names of the S(a,b) tables for this Material and add them
if 'sab_names' in group:
sab_tables = group['sab_names'][()]
for sab_table in sab_tables:
name = sab_table.decode()
material.add_s_alpha_beta(name)
# Set the Material's density to atom/b-cm as used by OpenMC
material.set_density(density=density, units='atom/b-cm')
if 'nuclides' in group:
nuclides = group['nuclides'][()]
# Add all nuclides to the Material
for fullname, density in zip(nuclides, nuc_densities):
name = fullname.decode().strip()
material.add_nuclide(name, percent=density, percent_type='ao')
if 'macroscopics' in group:
macroscopics = group['macroscopics'][()]
# Add all macroscopics to the Material
for fullname in macroscopics:
name = fullname.decode().strip()
material.add_macroscopic(name)
return material
def add_volume_information(self, volume_calc):
"""Add volume information to a material.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'material':
if self.id in volume_calc.volumes:
self._volume = volume_calc.volumes[self.id].n
self._atoms = volume_calc.atoms[self.id]
else:
raise ValueError('No volume information found for material ID={}.'
.format(self.id))
else:
raise ValueError('No volume information found for material ID={}.'
.format(self.id))
def set_density(self, units, density=None):
"""Set the density of the material
Parameters
----------
units : {'g/cm3', 'g/cc', 'kg/m3', 'atom/b-cm', 'atom/cm3', 'sum', 'macro'}
Physical units of density.
density : float, optional
Value of the density. Must be specified unless units is given as
'sum'.
"""
cv.check_value('density units', units, DENSITY_UNITS)
self._density_units = units
if units == 'sum':
if density is not None:
msg = 'Density "{}" for Material ID="{}" is ignored ' \
'because the unit is "sum"'.format(density, self.id)
warnings.warn(msg)
else:
if density is None:
msg = 'Unable to set the density for Material ID="{}" ' \
'because a density value must be given when not using ' \
'"sum" unit'.format(self.id)
raise ValueError(msg)
cv.check_type('the density for Material ID="{}"'.format(self.id),
density, Real)
self._density = density
def add_nuclide(self, nuclide, percent, percent_type='ao'):
"""Add a nuclide to the material
Parameters
----------
nuclide : str
Nuclide to add, e.g., 'Mo95'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
"""
cv.check_type('nuclide', nuclide, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
if self._macroscopic is not None:
msg = 'Unable to add a Nuclide to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
# If nuclide name doesn't look valid, give a warning
try:
Z, _, _ = openmc.data.zam(nuclide)
except ValueError as e:
warnings.warn(str(e))
else:
# For actinides, have the material be depletable by default
if Z >= 89:
self.depletable = True
self._nuclides.append(NuclideTuple(nuclide, percent, percent_type))
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the material
Parameters
----------
nuclide : str
Nuclide to remove
"""
cv.check_type('nuclide', nuclide, str)
# If the Material contains the Nuclide, delete it
for nuc in reversed(self.nuclides):
if nuclide == nuc.name:
self.nuclides.remove(nuc)
def add_macroscopic(self, macroscopic):
"""Add a macroscopic to the material. This will also set the
density of the material to 1.0, unless it has been otherwise set,
as a default for Macroscopic cross sections.
Parameters
----------
macroscopic : str
Macroscopic to add
"""
# Ensure no nuclides, elements, or sab are added since these would be
# incompatible with macroscopics
if self._nuclides or self._sab:
msg = 'Unable to add a Macroscopic data set to Material ID="{}" ' \
'with a macroscopic value "{}" as an incompatible data ' \
'member (i.e., nuclide or S(a,b) table) ' \
'has already been added'.format(self._id, macroscopic)
raise ValueError(msg)
if not isinstance(macroscopic, str):
msg = 'Unable to add a Macroscopic to Material ID="{}" with a ' \
'non-string value "{}"'.format(self._id, macroscopic)
raise ValueError(msg)
if self._macroscopic is None:
self._macroscopic = macroscopic
else:
msg = 'Unable to add a Macroscopic to Material ID="{}". ' \
'Only one Macroscopic allowed per ' \
'Material.'.format(self._id)
raise ValueError(msg)
# Generally speaking, the density for a macroscopic object will
# be 1.0. Therefore, lets set density to 1.0 so that the user
# doesnt need to set it unless its needed.
# Of course, if the user has already set a value of density,
# then we will not override it.
if self._density is None:
self.set_density('macro', 1.0)
def remove_macroscopic(self, macroscopic):
"""Remove a macroscopic from the material
Parameters
----------
macroscopic : str
Macroscopic to remove
"""
if not isinstance(macroscopic, str):
msg = 'Unable to remove a Macroscopic "{}" in Material ID="{}" ' \
'since it is not a string'.format(self._id, macroscopic)
raise ValueError(msg)
# If the Material contains the Macroscopic, delete it
if macroscopic == self._macroscopic:
self._macroscopic = None
def add_element(self, element, percent, percent_type='ao', enrichment=None,
enrichment_target=None, enrichment_type=None):
"""Add a natural element to the material
Parameters
----------
element : str
Element to add, e.g., 'Zr' or 'Zirconium'
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment of an enrichment_taget nuclide in percent (ao or wo).
If enrichment_taget is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U.
Default is None (natural composition).
enrichment_target: str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
.. versionadded:: 0.12
enrichment_type: {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
.. versionadded:: 0.12
Notes
-----
General enrichment procedure is allowed only for elements composed of
two isotopes. If `enrichment_target` is given without `enrichment`
natural composition is added to the material.
"""
cv.check_type('nuclide', element, str)
cv.check_type('percent', percent, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo'})
# Make sure element name is just that
if not element.isalpha():
raise ValueError("Element name should be given by the "
"element's symbol or name, e.g., 'Zr', 'zirconium'")
# Allow for element identifier to be given as a symbol or name
if len(element) > 2:
el = element.lower()
element = openmc.data.ELEMENT_SYMBOL.get(el)
if element is None:
msg = 'Element name "{}" not recognised'.format(el)
raise ValueError(msg)
else:
if element[0].islower():
msg = 'Element name "{}" should start with an uppercase ' \
'letter'.format(element)
raise ValueError(msg)
if len(element) == 2 and element[1].isupper():
msg = 'Element name "{}" should end with a lowercase ' \
'letter'.format(element)
raise ValueError(msg)
# skips the first entry of ATOMIC_SYMBOL which is n for neutron
if element not in list(openmc.data.ATOMIC_SYMBOL.values())[1:]:
msg = 'Element name "{}" not recognised'.format(element)
raise ValueError(msg)
if self._macroscopic is not None:
msg = 'Unable to add an Element to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if enrichment is not None and enrichment_target is None:
if not isinstance(enrichment, Real):
msg = 'Unable to add an Element to Material ID="{}" with a ' \
'non-floating point enrichment value "{}"'\
.format(self._id, enrichment)
raise ValueError(msg)
elif element != 'U':
msg = 'Unable to use enrichment for element {} which is not ' \
'uranium for Material ID="{}"'.format(element, self._id)
raise ValueError(msg)
# Check that the enrichment is in the valid range
cv.check_less_than('enrichment', enrichment, 100./1.008)
cv.check_greater_than('enrichment', enrichment, 0., equality=True)
if enrichment > 5.0:
msg = 'A uranium enrichment of {} was given for Material ID='\
'"{}". OpenMC assumes the U234/U235 mass ratio is '\
'constant at 0.008, which is only valid at low ' \
'enrichments. Consider setting the isotopic ' \
'composition manually for enrichments over 5%.'.\
format(enrichment, self._id)
warnings.warn(msg)
# Add naturally-occuring isotopes
element = openmc.Element(element)
for nuclide in element.expand(percent,
percent_type,
enrichment,
enrichment_target,
enrichment_type):
self.add_nuclide(*nuclide)
def add_elements_from_formula(self, formula, percent_type='ao', enrichment=None,
enrichment_target=None, enrichment_type=None):
"""Add a elements from a chemical formula to the material.
.. versionadded:: 0.12
Parameters
----------
formula : str
Formula to add, e.g., 'C2O', 'C6H12O6', or (NH4)2SO4.
Note this is case sensitive, elements must start with an uppercase
character. Multiplier numbers must be integers.
percent_type : {'ao', 'wo'}, optional
'ao' for atom percent and 'wo' for weight percent. Defaults to atom
percent.
enrichment : float, optional
Enrichment of an enrichment_target nuclide in percent (ao or wo).
If enrichment_target is not supplied then it is enrichment for U235
in weight percent. For example, input 4.95 for 4.95 weight percent
enriched U. Default is None (natural composition).
enrichment_target : str, optional
Single nuclide name to enrich from a natural composition (e.g., 'O16')
enrichment_type : {'ao', 'wo'}, optional
'ao' for enrichment as atom percent and 'wo' for weight percent.
Default is: 'ao' for two-isotope enrichment; 'wo' for U enrichment
Notes
-----
General enrichment procedure is allowed only for elements composed of
two isotopes. If `enrichment_target` is given without `enrichment`
natural composition is added to the material.
"""
cv.check_type('formula', formula, str)
if '.' in formula:
msg = 'Non-integer multiplier values are not accepted. The ' \
'input formula {} contains a "." character.'.format(formula)
raise ValueError(msg)
# Tokenizes the formula and check validity of tokens
tokens = re.findall(r"([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)", formula)
for row in tokens:
for token in row:
if token.isalpha():
if token == "n" or token not in openmc.data.ATOMIC_NUMBER:
msg = 'Formula entry {} not an element symbol.' \
.format(token)
raise ValueError(msg)
elif token not in ['(', ')', ''] and not token.isdigit():
msg = 'Formula must be made from a sequence of ' \
'element symbols, integers, and backets. ' \
'{} is not an allowable entry.'.format(token)
raise ValueError(msg)
# Checks that the number of opening and closing brackets are equal
if formula.count('(') != formula.count(')'):
msg = 'Number of opening and closing brackets is not equal ' \
'in the input formula {}.'.format(formula)
raise ValueError(msg)
# Checks that every part of the original formula has been tokenized
for row in tokens:
for token in row:
formula = formula.replace(token, '', 1)
if len(formula) != 0:
msg = 'Part of formula was not successfully parsed as an ' \
'element symbol, bracket or integer. {} was not parsed.' \
.format(formula)
raise ValueError(msg)
# Works through the tokens building a stack
mat_stack = [Counter()]
for symbol, multi1, opening_bracket, closing_bracket, multi2 in tokens:
if symbol:
mat_stack[-1][symbol] += int(multi1 or 1)
if opening_bracket:
mat_stack.append(Counter())
if closing_bracket:
stack_top = mat_stack.pop()
for symbol, value in stack_top.items():
mat_stack[-1][symbol] += int(multi2 or 1) * value
# Normalizing percentages
percents = mat_stack[0].values()
norm_percents = [float(i) / sum(percents) for i in percents]
elements = mat_stack[0].keys()
# Adds each element and percent to the material
for element, percent in zip(elements, norm_percents):
if enrichment_target is not None and element == re.sub(r'\d+$', '', enrichment_target):
self.add_element(element, percent, percent_type, enrichment,
enrichment_target, enrichment_type)
elif enrichment is not None and enrichment_target is None and element == 'U':
self.add_element(element, percent, percent_type, enrichment)
else:
self.add_element(element, percent, percent_type)
def add_s_alpha_beta(self, name, fraction=1.0):
r"""Add an :math:`S(\alpha,\beta)` table to the material
Parameters
----------
name : str
Name of the :math:`S(\alpha,\beta)` table
fraction : float
The fraction of relevant nuclei that are affected by the
:math:`S(\alpha,\beta)` table. For example, if the material is a
block of carbon that is 60% graphite and 40% amorphous then add a
graphite :math:`S(\alpha,\beta)` table with fraction=0.6.
"""
if self._macroscopic is not None:
msg = 'Unable to add an S(a,b) table to Material ID="{}" as a ' \
'macroscopic data-set has already been added'.format(self._id)
raise ValueError(msg)
if not isinstance(name, str):
msg = 'Unable to add an S(a,b) table to Material ID="{}" with a ' \
'non-string table name "{}"'.format(self._id, name)
raise ValueError(msg)
cv.check_type('S(a,b) fraction', fraction, Real)
cv.check_greater_than('S(a,b) fraction', fraction, 0.0, True)
cv.check_less_than('S(a,b) fraction', fraction, 1.0, True)
new_name = openmc.data.get_thermal_name(name)
if new_name != name:
msg = 'OpenMC S(a,b) tables follow the GND naming convention. ' \
'Table "{}" is being renamed as "{}".'.format(name, new_name)
warnings.warn(msg)
self._sab.append((new_name, fraction))
def make_isotropic_in_lab(self):
self.isotropic = [x.name for x in self._nuclides]
def get_elements(self):
"""Returns all elements in the material
.. versionadded:: 0.12
Returns
-------
elements : list of str
List of element names
"""
return sorted({re.split(r'(\d+)', i)[0] for i in self.get_nuclides()})
def get_nuclides(self):
"""Returns all nuclides in the material
Returns
-------
nuclides : list of str
List of nuclide names
"""
return [x.name for x in self._nuclides]
def get_nuclide_densities(self):
"""Returns all nuclides in the material and their densities
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are 3-tuples of
(nuclide, density percent, density percent type)
"""
# keep ordered dictionary for testing purposes
nuclides = OrderedDict()
for nuclide in self._nuclides:
nuclides[nuclide.name] = nuclide
return nuclides
def get_nuclide_atom_densities(self):
"""Returns all nuclides in the material and their atomic densities in
units of atom/b-cm
Returns
-------
nuclides : dict
Dictionary whose keys are nuclide names and values are tuples of
(nuclide, density in atom/b-cm)
"""
sum_density = False
if self.density_units == 'sum':
sum_density = True
density = 0.
elif self.density_units == 'macro':
density = self.density
elif self.density_units == 'g/cc' or self.density_units == 'g/cm3':
density = -self.density
elif self.density_units == 'kg/m3':
density = -0.001 * self.density
elif self.density_units == 'atom/b-cm':
density = self.density
elif self.density_units == 'atom/cm3' or self.density_units == 'atom/cc':
density = 1.E-24 * self.density
# For ease of processing split out nuc, nuc_density,
# and nuc_density_type into separate arrays
nucs = []
nuc_densities = []
nuc_density_types = []
for nuclide in self.nuclides:
nucs.append(nuclide.name)
nuc_densities.append(nuclide.percent)
nuc_density_types.append(nuclide.percent_type)
nucs = np.array(nucs)
nuc_densities = np.array(nuc_densities)
nuc_density_types = np.array(nuc_density_types)
if sum_density:
density = np.sum(nuc_densities)
percent_in_atom = np.all(nuc_density_types == 'ao')
density_in_atom = density > 0.
sum_percent = 0.
# Convert the weight amounts to atomic amounts
if not percent_in_atom:
for n, nuc in enumerate(nucs):
nuc_densities[n] *= self.average_molar_mass / \
openmc.data.atomic_mass(nuc)
# Now that we have the atomic amounts, lets finish calculating densities
sum_percent = np.sum(nuc_densities)
nuc_densities = nuc_densities / sum_percent
# Convert the mass density to an atom density
if not density_in_atom:
density = -density / self.average_molar_mass * 1.E-24 \
* openmc.data.AVOGADRO
nuc_densities = density * nuc_densities
nuclides = OrderedDict()
for n, nuc in enumerate(nucs):
nuclides[nuc] = (nuc, nuc_densities[n])
return nuclides
def get_mass_density(self, nuclide=None):
"""Return mass density of one or all nuclides
Parameters
----------
nuclides : str, optional
Nuclide for which density is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Density of the nuclide/material in [g/cm^3]
"""
mass_density = 0.0
for nuc, atoms_per_cc in self.get_nuclide_atom_densities().values():
if nuclide is None or nuclide == nuc:
density_i = 1e24 * atoms_per_cc * openmc.data.atomic_mass(nuc) \
/ openmc.data.AVOGADRO
mass_density += density_i
return mass_density
def get_mass(self, nuclide=None):
"""Return mass of one or all nuclides.
Note that this method requires that the :attr:`Material.volume` has
already been set.
Parameters
----------
nuclides : str, optional
Nuclide for which mass is desired. If not specified, the density
for the entire material is given.
Returns
-------
float
Mass of the nuclide/material in [g]
"""
if self.volume is None:
raise ValueError("Volume must be set in order to determine mass.")
return self.volume*self.get_mass_density(nuclide)
def clone(self, memo=None):
"""Create a copy of this material with a new unique ID.
Parameters
----------
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Material
The clone of this material
"""
if memo is None:
memo = {}
# If no nemoize'd clone exists, instantiate one
if self not in memo:
# Temporarily remove paths -- this is done so that when the clone is
# made, it doesn't create a copy of the paths (which are specific to
# an instance)
paths = self._paths
self._paths = None
clone = deepcopy(self)
clone.id = None
clone._num_instances = None
# Restore paths on original instance
self._paths = paths
# Memoize the clone
memo[self] = clone
return memo[self]
def _get_nuclide_xml(self, nuclide):
xml_element = ET.Element("nuclide")
xml_element.set("name", nuclide.name)
if nuclide.percent_type == 'ao':
xml_element.set("ao", str(nuclide.percent))
else:
xml_element.set("wo", str(nuclide.percent))
return xml_element
def _get_macroscopic_xml(self, macroscopic):
xml_element = ET.Element("macroscopic")
xml_element.set("name", macroscopic)
return xml_element
def _get_nuclides_xml(self, nuclides):
xml_elements = []
for nuclide in nuclides:
xml_elements.append(self._get_nuclide_xml(nuclide))
return xml_elements
def to_xml_element(self, cross_sections=None):
"""Return XML representation of the material
Parameters
----------
cross_sections : str
Path to an XML cross sections listing file
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing material data
"""
# Create Material XML element
element = ET.Element("material")
element.set("id", str(self._id))
if len(self._name) > 0:
element.set("name", str(self._name))
if self._depletable:
element.set("depletable", "true")
if self._volume:
element.set("volume", str(self._volume))
# Create temperature XML subelement
if self.temperature is not None:
element.set("temperature", str(self.temperature))
# Create density XML subelement
if self._density is not None or self._density_units == 'sum':
subelement = ET.SubElement(element, "density")
if self._density_units != 'sum':
subelement.set("value", str(self._density))
subelement.set("units", self._density_units)
else:
raise ValueError('Density has not been set for material {}!'
.format(self.id))
if self._macroscopic is None:
# Create nuclide XML subelements
subelements = self._get_nuclides_xml(self._nuclides)
for subelement in subelements:
element.append(subelement)
else:
# Create macroscopic XML subelements
subelement = self._get_macroscopic_xml(self._macroscopic)
element.append(subelement)
if self._sab:
for sab in self._sab:
subelement = ET.SubElement(element, "sab")
subelement.set("name", sab[0])
if sab[1] != 1.0:
subelement.set("fraction", str(sab[1]))
if self._isotropic:
subelement = ET.SubElement(element, "isotropic")
subelement.text = ' '.join(self._isotropic)
return element
@classmethod
def mix_materials(cls, materials, fracs, percent_type='ao', name=None):
"""Mix materials together based on atom, weight, or volume fractions
.. versionadded:: 0.12
Parameters
----------
materials : Iterable of openmc.Material
Materials to combine
fracs : Iterable of float
Fractions of each material to be combined
percent_type : {'ao', 'wo', 'vo'}
Type of percentage, must be one of 'ao', 'wo', or 'vo', to signify atom
percent (molar percent), weight percent, or volume percent,
optional. Defaults to 'ao'
name : str
The name for the new material, optional. Defaults to concatenated
names of input materials with percentages indicated inside
parentheses.
Returns
-------
openmc.Material
Mixture of the materials
"""
cv.check_type('materials', materials, Iterable, Material)
cv.check_type('fracs', fracs, Iterable, Real)
cv.check_value('percent type', percent_type, {'ao', 'wo', 'vo'})
fracs = np.asarray(fracs)
void_frac = 1. - np.sum(fracs)
# Warn that fractions don't add to 1, set remainder to void, or raise
# an error if percent_type isn't 'vo'
if not np.isclose(void_frac, 0.):
if percent_type in ('ao', 'wo'):
msg = ('A non-zero void fraction is not acceptable for '
'percent_type: {}'.format(percent_type))
raise ValueError(msg)
else:
msg = ('Warning: sum of fractions do not add to 1, void '
'fraction set to {}'.format(void_frac))
warnings.warn(msg)
# Calculate appropriate weights which are how many cc's of each
# material are found in 1cc of the composite material
amms = np.asarray([mat.average_molar_mass for mat in materials])
mass_dens = np.asarray([mat.get_mass_density() for mat in materials])
if percent_type == 'ao':
wgts = fracs * amms / mass_dens
wgts /= np.sum(wgts)
elif percent_type == 'wo':
wgts = fracs / mass_dens
wgts /= np.sum(wgts)
elif percent_type == 'vo':
wgts = fracs
# If any of the involved materials contain S(a,b) tables raise an error
sab_names = set(sab[0] for mat in materials for sab in mat._sab)
if sab_names:
msg = ('Currently we do not support mixing materials containing '
'S(a,b) tables')
raise NotImplementedError(msg)
# Add nuclide densities weighted by appropriate fractions
nuclides_per_cc = defaultdict(float)
mass_per_cc = defaultdict(float)
for mat, wgt in zip(materials, wgts):
for nuc, atoms_per_bcm in mat.get_nuclide_atom_densities().values():
nuc_per_cc = wgt*1.e24*atoms_per_bcm
nuclides_per_cc[nuc] += nuc_per_cc
mass_per_cc[nuc] += nuc_per_cc*openmc.data.atomic_mass(nuc) / \
openmc.data.AVOGADRO
# Create the new material with the desired name
if name is None:
name = '-'.join(['{}({})'.format(m.name, f) for m, f in
zip(materials, fracs)])
new_mat = openmc.Material(name=name)
# Compute atom fractions of nuclides and add them to the new material
tot_nuclides_per_cc = np.sum([dens for dens in nuclides_per_cc.values()])
for nuc, atom_dens in nuclides_per_cc.items():
new_mat.add_nuclide(nuc, atom_dens/tot_nuclides_per_cc, 'ao')
# Compute mass density for the new material and set it
new_density = np.sum([dens for dens in mass_per_cc.values()])
new_mat.set_density('g/cm3', new_density)
# If any of the involved materials is depletable, the new material is
# depletable
new_mat.depletable = any(mat.depletable for mat in materials)
return new_mat
@classmethod
def from_xml_element(cls, elem):
"""Generate material from an XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
XML element
Returns
-------
openmc.Material
Material generated from XML element
"""
mat_id = int(elem.get('id'))
mat = cls(mat_id)
mat.name = elem.get('name')
if "temperature" in elem.attrib:
mat.temperature = float(elem.get("temperature"))
if 'volume' in elem.attrib:
mat.volume = float(elem.get('volume'))
mat.depletable = bool(elem.get('depletable'))
# Get each nuclide
for nuclide in elem.findall('nuclide'):
name = nuclide.attrib['name']
if 'ao' in nuclide.attrib:
mat.add_nuclide(name, float(nuclide.attrib['ao']))
elif 'wo' in nuclide.attrib:
mat.add_nuclide(name, float(nuclide.attrib['wo']), 'wo')
# Get each S(a,b) table
for sab in elem.findall('sab'):
fraction = float(sab.get('fraction', 1.0))
mat.add_s_alpha_beta(sab.get('name'), fraction)
# Get total material density
density = elem.find('density')
units = density.get('units')
if units == 'sum':
mat.set_density(units)
else:
value = float(density.get('value'))
mat.set_density(units, value)
# Check for isotropic scattering nuclides
isotropic = elem.find('isotropic')
if isotropic is not None:
mat.isotropic = isotropic.text.split()
return mat
class Materials(cv.CheckedList):
"""Collection of Materials used for an OpenMC simulation.
This class corresponds directly to the materials.xml input file. It can be
thought of as a normal Python list where each member is a
:class:`Material`. It behaves like a list as the following example
demonstrates:
>>> fuel = openmc.Material()
>>> clad = openmc.Material()
>>> water = openmc.Material()
>>> m = openmc.Materials([fuel])
>>> m.append(water)
>>> m += [clad]
Parameters
----------
materials : Iterable of openmc.Material
Materials to add to the collection
cross_sections : str
Indicates the path to an XML cross section listing file (usually named
cross_sections.xml). If it is not set, the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable will be used for
continuous-energy calculations and
:envvar:`OPENMC_MG_CROSS_SECTIONS` will be used for multi-group
calculations to find the path to the HDF5 cross section file.
"""
def __init__(self, materials=None):
super().__init__(Material, 'materials collection')
self._cross_sections = None
if materials is not None:
self += materials
@property
def cross_sections(self):
return self._cross_sections
@cross_sections.setter
def cross_sections(self, cross_sections):
cv.check_type('cross sections', cross_sections, str)
self._cross_sections = cross_sections
def append(self, material):
"""Append material to collection
Parameters
----------
material : openmc.Material
Material to append
"""
super().append(material)
def insert(self, index, material):
"""Insert material before index
Parameters
----------
index : int
Index in list
material : openmc.Material
Material to insert
"""
super().insert(index, material)
def make_isotropic_in_lab(self):
for material in self:
material.make_isotropic_in_lab()
def export_to_xml(self, path='materials.xml'):
"""Export material collection to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'materials.xml'.
"""
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'materials.xml'
# Write materials to the file one-at-a-time. This significantly reduces
# memory demand over allocating a complete ElementTree and writing it in
# one go.
with open(str(p), 'w', encoding='utf-8',
errors='xmlcharrefreplace') as fh:
# Write the header and the opening tag for the root element.
fh.write("<?xml version='1.0' encoding='utf-8'?>\n")
fh.write('<materials>\n')
# Write the <cross_sections> element.
if self._cross_sections is not None:
element = ET.Element('cross_sections')
element.text = str(self._cross_sections)
clean_indentation(element, level=1)
element.tail = element.tail.strip(' ')
fh.write(' ')
reorder_attributes(element) # TODO: Remove when support is Python 3.8+
ET.ElementTree(element).write(fh, encoding='unicode')
# Write the <material> elements.
for material in sorted(self, key=lambda x: x.id):
element = material.to_xml_element(self.cross_sections)
clean_indentation(element, level=1)
element.tail = element.tail.strip(' ')
fh.write(' ')
reorder_attributes(element) # TODO: Remove when support is Python 3.8+
ET.ElementTree(element).write(fh, encoding='unicode')
# Write the closing tag for the root element.
fh.write('</materials>\n')
@classmethod
def from_xml(cls, path='materials.xml'):
"""Generate materials collection from XML file
Parameters
----------
path : str, optional
Path to materials XML file
Returns
-------
openmc.Materials
Materials collection
"""
tree = ET.parse(path)
root = tree.getroot()
# Generate each material
materials = cls()
for material in root.findall('material'):
materials.append(Material.from_xml_element(material))
# Check for cross sections settings
xs = tree.find('cross_sections')
if xs is not None:
materials.cross_sections = xs.text
return materials
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.pci import pci_device
from nova.pci import pci_manager
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.pci import pci_fakes
fake_pci = {
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p',
'vendor_id': 'v',
'request_id': None,
'status': 'available'}
fake_pci_1 = dict(fake_pci, address='0000:00:00.2',
product_id='p1', vendor_id='v1')
fake_pci_2 = dict(fake_pci, address='0000:00:00.3')
fake_db_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:00:00.1',
'vendor_id': 'v',
'product_id': 'p',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
'request_id': None,
}
fake_db_dev_1 = dict(fake_db_dev, vendor_id='v1',
product_id='p1', id=2,
address='0000:00:00.2')
fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3')
fake_db_devs = [fake_db_dev, fake_db_dev_1, fake_db_dev_2]
fake_pci_requests = [
{'count': 1,
'spec': [{'vendor_id': 'v'}]},
{'count': 1,
'spec': [{'vendor_id': 'v1'}]}]
class PciDevTrackerTestCase(test.TestCase):
def _create_fake_instance(self):
self.inst = objects.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = objects.PciDeviceList()
self.inst.vm_state = vm_states.ACTIVE
self.inst.task_state = None
def _fake_get_pci_devices(self, ctxt, node_id):
return fake_db_devs[:]
def _fake_pci_device_update(self, ctxt, node_id, address, value):
self.update_called += 1
self.called_values = value
fake_return = copy.deepcopy(fake_db_dev)
return fake_return
def _fake_pci_device_destroy(self, ctxt, node_id, address):
self.destroy_called += 1
def _create_pci_requests_object(self, mock_get, requests):
pci_reqs = []
for request in requests:
pci_req_obj = objects.InstancePCIRequest(count=request['count'],
spec=request['spec'])
pci_reqs.append(pci_req_obj)
mock_get.return_value = objects.InstancePCIRequests(requests=pci_reqs)
def setUp(self):
super(PciDevTrackerTestCase, self).setUp()
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_get_pci_devices)
# The fake_pci_whitelist must be called before creating the fake
# devices
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_instance()
self.tracker = pci_manager.PciDevTracker(1)
def test_pcidev_tracker_create(self):
self.assertEqual(len(self.tracker.pci_devs), 3)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
self.assertEqual(self.tracker.stale.keys(), [])
self.assertEqual(len(self.tracker.stats.pools), 2)
self.assertEqual(self.tracker.node_id, 1)
def test_pcidev_tracker_create_no_nodeid(self):
self.tracker = pci_manager.PciDevTracker()
self.assertEqual(len(self.tracker.pci_devs), 0)
def test_set_hvdev_new_dev(self):
fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)]
self.tracker.set_hvdevs(fake_pci_devs)
self.assertEqual(len(self.tracker.pci_devs), 4)
self.assertEqual(set([dev['address'] for
dev in self.tracker.pci_devs]),
set(['0000:00:00.1', '0000:00:00.2',
'0000:00:00.3', '0000:00:00.4']))
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1', 'v2']))
def test_set_hvdev_changed(self):
fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
copy.deepcopy(fake_pci_v2)]
self.tracker.set_hvdevs(fake_pci_devs)
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
def test_set_hvdev_remove(self):
self.tracker.set_hvdevs([fake_pci])
self.assertEqual(len([dev for dev in self.tracker.pci_devs
if dev['status'] == 'removed']),
2)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_set_hvdev_changed_stal(self, mock_get):
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
self.tracker._claim_instance(mock.sentinel.context, self.inst)
fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
copy.deepcopy(fake_pci_3)]
self.tracker.set_hvdevs(fake_pci_devs)
self.assertEqual(len(self.tracker.stale), 1)
self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_active(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0]['vendor_id'], 'v')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_fail(self, mock_get):
pci_requests = copy.deepcopy(fake_pci_requests)
pci_requests[0]['count'] = 4
self._create_pci_requests_object(mock_get, pci_requests)
self.assertRaises(exception.PciDeviceRequestFailed,
self.tracker.update_pci_for_instance,
None,
self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_deleted(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.inst.vm_state = vm_states.DELETED
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_resize_source(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.inst.task_state = task_states.RESIZE_MIGRATED
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_resize_dest(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_migration(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2)
self.assertNotIn('fake-inst-uuid', self.tracker.allocations)
self.inst.task_state = task_states.RESIZE_FINISH
self.tracker.update_pci_for_instance(None, self.inst)
self.assertEqual(len(self.tracker.allocations['fake-inst-uuid']), 2)
self.assertNotIn('fake-inst-uuid', self.tracker.claims)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_migration_in(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_migration(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0]['vendor_id'], 'v')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_migration_out(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_migration(None, self.inst)
self.tracker.update_pci_for_migration(None, self.inst, sign=-1)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
def test_save(self):
self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
ctxt = context.get_admin_context()
fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
copy.deepcopy(fake_pci_v3)]
self.tracker.set_hvdevs(fake_pci_devs)
self.update_called = 0
self.tracker.save(ctxt)
self.assertEqual(self.update_called, 3)
def test_save_removed(self):
self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
self.stubs.Set(db, "pci_device_destroy", self._fake_pci_device_destroy)
self.destroy_called = 0
ctxt = context.get_admin_context()
self.assertEqual(len(self.tracker.pci_devs), 3)
dev = self.tracker.pci_devs[0]
self.update_called = 0
pci_device.remove(dev)
self.tracker.save(ctxt)
self.assertEqual(len(self.tracker.pci_devs), 2)
self.assertEqual(self.destroy_called, 1)
def test_set_compute_node_id(self):
self.tracker = pci_manager.PciDevTracker()
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
copy.deepcopy(fake_pci_2)]
self.tracker.set_hvdevs(fake_pci_devs)
self.tracker.set_compute_node_id(1)
self.assertEqual(self.tracker.node_id, 1)
self.assertEqual(self.tracker.pci_devs[0].compute_node_id, 1)
fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_3)]
self.tracker.set_hvdevs(fake_pci_devs)
for dev in self.tracker.pci_devs:
self.assertEqual(dev.compute_node_id, 1)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_clean_usage(self, mock_get):
inst_2 = copy.copy(self.inst)
inst_2.uuid = 'uuid5'
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
self.tracker.update_pci_for_instance(None, self.inst)
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
self.tracker.update_pci_for_instance(None, inst_2)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0]['vendor_id'], 'v')
self.tracker.clean_usage([self.inst], [migr], [orph])
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 2)
self.assertEqual(
set([dev['vendor_id'] for dev in free_devs]),
set(['v', 'v1']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_clean_usage_claims(self, mock_get):
inst_2 = copy.copy(self.inst)
inst_2.uuid = 'uuid5'
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
self.tracker.update_pci_for_instance(None, self.inst)
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
self.tracker.update_pci_for_migration(None, inst_2)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.tracker.clean_usage([self.inst], [migr], [orph])
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 2)
self.assertEqual(
set([dev['vendor_id'] for dev in free_devs]),
set(['v', 'v1']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_clean_usage_no_request_match_no_claims(self, mock_get):
# Tests the case that there is no match for the request so the
# claims mapping is set to None for the instance when the tracker
# calls clean_usage.
self._create_pci_requests_object(mock_get, [])
self.tracker.update_pci_for_migration(None, instance=self.inst, sign=1)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(3, len(free_devs))
self.tracker.clean_usage([], [], [])
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(3, len(free_devs))
self.assertEqual(
set([dev['address'] for dev in free_devs]),
set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3']))
class PciGetInstanceDevs(test.TestCase):
def test_get_devs_object(self):
def _fake_obj_load_attr(foo, attrname):
if attrname == 'pci_devices':
self.load_attr_called = True
foo.pci_devices = objects.PciDeviceList()
inst = fakes.stub_instance(id='1')
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(ctxt, '1', columns_to_join=[]
).AndReturn(inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_id(ctxt, '1', expected_attrs=[])
self.stubs.Set(objects.Instance, 'obj_load_attr', _fake_obj_load_attr)
self.load_attr_called = False
pci_manager.get_instance_pci_devs(inst)
self.assertEqual(self.load_attr_called, True)
|
|
""" Generic Unification algorithm for expression trees with lists of children
This implementation is a direct translation of
Artificial Intelligence: A Modern Approach by Stuart Russel and Peter Norvig
Second edition, section 9.2, page 276
It is modified in the following ways:
1. We allow associative and commutative Compound expressions. This results in
combinatorial blowup.
2. We explore the tree lazily.
3. We provide generic interfaces to symbolic algebra libraries in Python.
A more traditional version can be found here
http://aima.cs.berkeley.edu/python/logic.html
"""
from __future__ import print_function, division
from sympy.utilities.iterables import kbins
class Compound(object):
""" A little class to represent an interior node in the tree
This is analagous to SymPy.Basic for non-Atoms
"""
def __init__(self, op, args):
self.op = op
self.args = args
def __eq__(self, other):
return (type(self) == type(other) and self.op == other.op and
self.args == other.args)
def __hash__(self):
return hash((type(self), self.op, self.args))
def __str__(self):
return "%s[%s]" % (str(self.op), ', '.join(map(str, self.args)))
class Variable(object):
""" A Wild token """
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return type(self) == type(other) and self.arg == other.arg
def __hash__(self):
return hash((type(self), self.arg))
def __str__(self):
return "Variable(%s)" % str(self.arg)
class CondVariable(object):
""" A wild token that matches conditionally
arg - a wild token
valid - an additional constraining function on a match
"""
def __init__(self, arg, valid):
self.arg = arg
self.valid = valid
def __eq__(self, other):
return (type(self) == type(other) and
self.arg == other.arg and
self.valid == other.valid)
def __hash__(self):
return hash((type(self), self.arg, self.valid))
def __str__(self):
return "CondVariable(%s)" % str(self.arg)
def unify(x, y, s=None):
""" Unify two expressions
inputs:
x, y - expression trees containing leaves, Compounds and Variables
s - a mapping of variables to subtrees
outputs:
lazy sequence of mappings {Variable: subtree}
Example
=======
>>> from sympy.unify.core import unify, Compound, Variable
>>> expr = Compound("Add", ("x", "y"))
>>> pattern = Compound("Add", ("x", Variable("a")))
>>> next(unify(expr, pattern, {}))
{Variable(a): 'y'}
"""
s = s or {}
if x == y:
yield s
elif isinstance(x, (Variable, CondVariable)):
for match in unify_var(x, y, s):
yield match
elif isinstance(y, (Variable, CondVariable)):
for match in unify_var(y, x, s):
yield match
elif isinstance(x, Compound) and isinstance(y, Compound):
for sop in unify(x.op, y.op, s):
if is_associative(x) and is_associative(y):
a, b = (x, y) if len(x.args) < len(y.args) else (y, x)
for aaargs, bbargs in allcombinations(a.args, b.args, 'associative'):
aa = [unpack(Compound(a.op, arg)) for arg in aaargs]
bb = [unpack(Compound(b.op, arg)) for arg in bbargs]
for match in unify(aa, bb, sop):
yield match
elif len(x.args) == len(y.args):
for match in unify(x.args, y.args, sop):
yield match
elif is_args(x) and is_args(y) and len(x) == len(y):
if len(x) == 0:
yield s
else:
for shead in unify(x[0], y[0], s):
for match in unify(x[1:], y[1:], shead):
yield match
def unify_var(var, x, s):
if var in s:
for match in unify(s[var], x, s):
yield match
elif isinstance(var, Variable):
yield assoc(s, var, x)
def occur_check(var, x):
""" var occurs in subtree owned by x? """
if var == x:
return True
elif isinstance(x, Compound):
return occur_check(var, x.args)
elif is_args(x):
if _any(occur_check(var, xi) for xi in x): return True
return False
def _any(iterable):
for i in iterable:
if i:
return True
return False
def assoc(d, key, val):
""" Return copy of d with key associated to val """
d = d.copy()
d[key] = val
return d
def is_args(x):
""" Is x a traditional iterable? """
return type(x) == list or type(x) == tuple
def unpack(x):
if isinstance(x, Compound) and len(x.args) == 1:
return x.args[0]
else:
return x
def is_associative(x):
return isinstance(x, Compound) and (x.op in ('Add', 'Mul', 'CAdd', 'CMul'))
def is_commutative(x):
return isinstance(x, Compound) and (x.op in ('CAdd', 'CMul'))
def allcombinations(A, B, ordered):
"""
Restructure A and B to have the same number of elements
ordered must be either 'commutative' or 'associative'
A and B can be rearranged so that the larger of the two lists is
reorganized into smaller sublists.
>>> from sympy.unify.core import allcombinations
>>> for x in allcombinations((1, 2, 3), (5, 6), 'associative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
>>> for x in allcombinations((1, 2, 3), (5, 6), 'commutative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
(((1,), (3, 2)), ((5,), (6,)))
(((1, 3), (2,)), ((5,), (6,)))
(((2,), (1, 3)), ((5,), (6,)))
(((2, 1), (3,)), ((5,), (6,)))
(((2,), (3, 1)), ((5,), (6,)))
(((2, 3), (1,)), ((5,), (6,)))
(((3,), (1, 2)), ((5,), (6,)))
(((3, 1), (2,)), ((5,), (6,)))
(((3,), (2, 1)), ((5,), (6,)))
(((3, 2), (1,)), ((5,), (6,)))
"""
sm, bg = (A, B) if len(A) < len(B) else (B, A)
for part in kbins(range(len(bg)), len(sm), None):
if bg == B:
yield list((a,) for a in A), core_partition(B, part)
else:
yield core_partition(A, part), list((b,) for b in B)
def core_partition(it, part):
""" Partition a tuple/list into pieces defined by indices
>>> from sympy.unify.core import partition
>>> partition((10, 20, 30, 40), [[0, 1, 2], [3]])
((10, 20, 30), (40,))
"""
return [index(it, ind) for ind in part]
def index(it, ind):
""" Fancy indexing into an indexable iterable (tuple, list)
>>> from sympy.unify.core import index
>>> index([10, 20, 30], (1, 2, 0))
[20, 30, 10]
"""
return [it[i] for i in ind]
|
|
'''
Read and write data to a Lego Mindstorm NXT brick using serial bluetooth
connection. You'll need to modify __init__ for unix style serial port
identification in order to use this on Linux.
Blue enables raw byte transfer
TypeBlue utilizes NXT mailbox number for type identification.
Usage:
1. Enable a bluetooth serial port to accept connection requests from NXT.
2. Find and connect computer from NXT bluetooth menu. Note serial port
number; store in comport_num.
3. From python try this code, note the try finally and make sure the connection
is established so that you are not waiting all the time for timeouts! It is
a real pain getting the comport back from a dropped connection.
import blueNXT
try:
b = blueNXT.TypeBlue(comport_num)
b.put('Hello NXT!')
b.putall(False, True, 1, 'two')
b.get()
finally:
b.close()
4. Write an interface to remote control your robots and share!
'''
__author__ = 'Justin Shaw'
import sys
import serial
import struct
import time
class Blue:
'''
A bluetooth connection to a Lego NXT brick
'''
huh = struct.pack('h', 2432) # don't really know what this is
def __init__(self, comport=9, filename=None, mode='r', timeout=10):
'''
comport - integer com number for serial port
filename and mode are for debug
'''
if filename is None:
self.s = serial.Serial('COM%d' % comport, timeout=timeout)
else:
self.s = open(filename, mode)
def get(self):
'''
Return payload, payload
Get next message from NXT, return un-molested payload i.e. bytes.
Use get_int() for integers and get_bool() for booleans
'''
sz = self.s.read(2)
payload = None
box = None
if len(sz) == 2:
sz = struct.unpack('h', sz)[0]
# print 'sz', sz
if 0 < sz < 1000:
msg = self.s.read(sz)
# print 'msg', msg
dat = msg[:4]
# for c in dat:
# print ord(c)
# print struct.unpack('h', msg[:2])
box = ord(dat[2]) + 1
payload = msg[4:-1]
return payload, box
def put(self, payload, box=1):
'''
Send a raw message to NXT
payload -- bytes to send
box -- 1 to 10, which mail box on NXT to place message in
'''
# sz msg----> 0
# 0123456789 ... n
payload += chr(0)
pl_sz = len(payload)
sz = pl_sz + 4
header = struct.pack('h2sbb', sz, self.huh, box - 1, pl_sz)
out = struct.pack('6s%ds' % pl_sz, header, payload)
# print 'out', out
dat = out[2:6]
# for c in dat:
# print ord(c)
# print
# self.s.write('\x11\x00\x80\t\x00\r<0123456789>\x00')
self.s.write(out)
def __del__(self):
try:
self.close()
except:
pass
def close(self):
self.s.close()
class TypeBlue(Blue):
'''
Use mailbox number for type information:
1 -- string
2 -- int
3 -- bool
else -- string
'''
def get(self):
'''
Get a message off port. Determine type from box number:
1 -- string
2 -- int
3 -- bool
'''
msg, box = Blue.get(self)
if box == 2:
out = struct.unpack('i', msg)[0]
elif box == 3:
out = not not(ord(msg))
else:
out = msg
return out
def put(self, val):
'''
Put a message on port. Use box to indicate type:
1 -- string
2 -- int
3 -- bool
'''
if type(val) == type(''):
msg = val
box = 1
elif type(val) == type(0):
msg = struct.pack('i', val)
box = 2
elif type(val) == type(False):
msg = struct.pack('b', not not val)
box = 3
return Blue.put(self, msg, box)
def putall(self, *vals):
'''
Send several values to NXT
'''
for v in vals:
self.put(v)
def Blue__test__():
'''
Test that the formats are consistant by reading and writing
to a file. No real bluetooth required.
'''
# read
b = Blue(filename='text.dat')
target = '<0123456789>'
for i in range(10):
msg, box = b.get()
assert msg == target, '%s != %s' % (msg, target)
# write
b = Blue(filename='junk', mode='wb')
b.put(target, 2)
b = Blue(filename='junk')
got, box = b.get()
assert box == 2
assert got == target, '%s != %s' % (got, target)
b = Blue(filename='num.dat')
# type
b = TypeBlue(filename='junk', mode='wb')
b.put(target)
b.put(1)
b.put(False)
b = TypeBlue(filename='junk')
got = b.get()
assert got == target
got = b.get()
assert got == 1
got = b.get()
assert got == False
def tblue():
'''
Real bluetooth test.
'''
try:
b = TypeBlue('COM10')
for i in range(20):
## only uncomment these if you have the NXT code sending data!
# print b.get()
# print b.get()
# print b.get()
# b.put(42)
# b.put(False)
b.put('HERE % d' % i)
b.put(i)
if i < 10:
b.put(False)
else:
b.put(True)
time.sleep(.25)
finally:
del b
# tblue()
# Blue__test__()
|
|
import logging
import sys
import os
import json
from collections import namedtuple, OrderedDict
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from plottingscripts.plotting.scatter import plot_scatter_plot
from aslib_scenario.aslib_scenario import ASlibScenario
from asapy.perf_analysis.perf_analysis import PerformanceAnalysis
from asapy.feature_analysis.feature_analysis import FeatureAnalysis
from asapy.out_builder.html_builder import HTMLBuilder
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "MIT"
__email__ = "lindauer@cs.uni-freiburg.de"
class ASAPy(object):
def __init__(self,
output_dn: str=".",
plot_log_perf: bool=False):
'''
Constructor
Arguments
---------
output_dn:str
output directory name
'''
self.logger = logging.getLogger("ASAPy")
self.scenario = None
self.output_dn = output_dn
self.plot_log_perf = plot_log_perf
if not os.path.isdir(self.output_dn):
os.mkdir(self.output_dn)
def read_scenario_ASlib(self, scenario_dn: str):
'''
Read scenario from ASlib format
Arguments
---------
scenario_dn: str
Scenario directory name
'''
self.scenario = ASlibScenario()
self.scenario.read_scenario(dn=scenario_dn)
def read_scenario_CSV(self, csv_data: namedtuple):
'''
Read scenario from ASlib format
Arguments
---------
csv_data: namedtuple
namedtuple with the following fields: "perf_csv", "feat_csv", "obj", "cutoff", "maximize", "cv_csv"
"cv_csv" can be None
'''
self.scenario = ASlibScenario()
self.scenario.read_from_csv(perf_fn=csv_data.perf_csv,
feat_fn=csv_data.feat_csv,
objective=csv_data.obj,
runtime_cutoff=csv_data.cutoff,
maximize=csv_data.maximize,
cv_fn=csv_data.cv_csv)
def get_default_config(self):
'''
get default configuration which enables all plots
Returns
-------
dict
'''
config = {"Performance Analysis": {"Status bar plot": True,
"Box plot": True,
"Violin plot": True,
"CDF plot": True,
"Scatter plots": True,
"Correlation plot": True,
"Contribution of algorithms": True,
"Critical Distance Diagram": True,
"Footprints": True,
"Instance Hardness": True,
"Baselines": True
},
"Feature Analysis": {"Status Bar Plot": True,
"Violin and box plots": True,
"Correlation plot": True,
"Feature importance": True,
"Clustering": True,
"CDF plot on feature costs": True
}
}
return config
def print_config(self):
'''
generate template for config file
'''
print(json.dumps(self.get_default_config(), indent=2))
def load_config(self, fn: str):
'''
load config from file
Arguments
---------
fn: str
file name with config in json format
Returns
-------
config: dict
'''
with open(fn) as fp:
config = json.load(fp)
return config
def main(self, config: dict, max_algos: int=20,
only_fold:int=None):
'''
main method
Arguments
---------
config: dict
configuration that enables or disables plots
max_algos: int
maximum number of algos to consider;
if more are available, we take the n best algorithm on average performance
only_fold: int
use only the given <only_fold> cv-fold data for analyze
'''
if only_fold:
self.logger.info("Using only test data from %d cv-split" %(only_fold))
_, self.scenario = self.scenario.get_split(only_fold)
n_prev_algos = None
if self.scenario is None:
raise ValueError(
"Please first read in Scenario data; use scenario input or csv input")
if self.scenario.performance_type[0] == "solution_quality" and self.scenario.maximize[0]:
# revoke inverting the performance as done in the scenario reader
self.scenario.performance_data *= -1
self.logger.info("Revoke * -1 on performance data")
if len(self.scenario.algorithms) > max_algos:
n_prev_algos = len(self.scenario.algorithms)
self.logger.warning(
"We reduce the algorithms to the greedy selected VBS-improving algorithms (at most %d)" % (max_algos))
pa = PerformanceAnalysis(output_dn=self.output_dn,
scenario=self.scenario)
algos_score = pa.reduce_algos(max_algos=max_algos)
best_algos = [a[0] for a in algos_score]
self.scenario.algorithms = best_algos
self.scenario.performance_data = self.scenario.performance_data[
best_algos]
self.scenario.runstatus_data = self.scenario.runstatus_data[
best_algos]
data = OrderedDict()
# meta data
meta_data_df = self.get_meta_data()
data["Meta Data"] = {
"table": meta_data_df.to_html(header=False)
}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# performance analysis
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
if config.get("Performance Analysis"):
pa = PerformanceAnalysis(output_dn=self.output_dn,
scenario=self.scenario)
data["Performance Analysis"] = OrderedDict()
if n_prev_algos is not None:
data["Performance Analysis"][
"tooltip"] = "To provide a clear overview, we reduced the number of algorithms (%d) to the greedily selected most VBS-improving %d algorithms." % (n_prev_algos, len(self.scenario.algorithms))
if config["Performance Analysis"].get("Baselines"):
baseline_table = pa.get_baselines()
data["Performance Analysis"]["Baselines"] = {"tooltip": "Baslines: Best Single Algorithm (i.e., best algorithm on average across instances), Virtual Best Solver (aka Oracle, i.e., average across best performance per instance; theoretical best algorithm selector)",
"table": baseline_table}
if config["Performance Analysis"].get("Status bar plot"):
status_plot = pa.get_bar_status_plot()
data["Performance Analysis"]["Status bar plot"] = {"tooltip": "Stacked bar plots for runstatus of each algorithm",
"figure": status_plot}
# get box plot
if config["Performance Analysis"].get("Box plot"):
box_plot = pa.get_box_plots(plot_log_perf=self.plot_log_perf)
data["Performance Analysis"]["Box plot"] = {"tooltip": "Box plots to show the performance distribution of each algorithm",
"figure": box_plot}
# get violin plot
if config["Performance Analysis"].get("Violin plot"):
violion_plot = pa.get_violin_plots(
plot_log_perf=self.plot_log_perf)
data["Performance Analysis"]["Violin plot"] = {"tooltip": "Violin plots to show the probablity density of each algorithm's performance. Also showing the median (middle line) and min/max value.",
"figure": violion_plot}
# get cdf plot
if config["Performance Analysis"].get("CDF plot"):
cdf_plot = pa.get_cdf_plots(plot_log_perf=self.plot_log_perf)
data["Performance Analysis"]["CDF plot"] = {"tooltip": "Cumulative Distribution function (CDF) plots. At each point x (e.g., running time cutoff), how many of the instances (in percentage) can be solved. Better algorithms have a higher curve for minimization problems.",
"figure": cdf_plot}
# get cd diagram
if config["Performance Analysis"].get("Critical Distance Diagram"):
cd_plot = pa.get_cd_diagram()
data["Performance Analysis"]["Critical Distance Diagram"] = {"tooltip": "Critical Distance (CD) diagram based on a Nemenyi two tailed test using average rankings. CD (top left) shows the critical distance. Distances larger than CD corresponds to a statistical significant difference in the ranking. We show only the best 20 ranked algorithms.",
"figure": cd_plot}
# generate scatter plots
if config["Performance Analysis"].get("Scatter plots"):
scatter_plots = pa.scatter_plots(
plot_log_perf=self.plot_log_perf)
data["Performance Analysis"]["Scatter plots"] = OrderedDict({
"tooltip": "Scatter plot to compare the performance of two algorithms on all instances -- each dot represents one instance."})
scatter_plots = sorted(
scatter_plots, key=lambda x: x[0] + x[1])
for plot_tuple in scatter_plots:
key = "%s vs %s" % (plot_tuple[0], plot_tuple[1])
data["Performance Analysis"]["Scatter plots"][
key] = {"figure": plot_tuple[2]}
# generate correlation plot
if config["Performance Analysis"].get("Correlation plot"):
correlation_plot = pa.correlation_plot()
data["Performance Analysis"]["Correlation plot"] = {"tooltip": "Correlation based on Spearman Correlation Coefficient between all algorithms and clustered with Wards hierarchical clustering approach. Darker fields corresponds to a larger correlation between the algorithms. See [Xu et al SAT 2012]",
"figure": correlation_plot}
# get contribution values
if config["Performance Analysis"].get("Contribution of algorithms"):
avg_fn, marg_fn, shap_fn = pa.get_contribution_values()
data["Performance Analysis"]["Contribution of algorithms"] = OrderedDict({
"tooltip": "Contribution of each algorithm"})
data["Performance Analysis"]["Contribution of algorithms"][
"Average Performance"] = {"figure": avg_fn}
data["Performance Analysis"]["Contribution of algorithms"]["Marginal Contribution"] = {"figure": marg_fn,
"tooltip": "Marginal contribution to the virtual best solver (VBS, aka oracle) (i.e., how much decreases the VBS performance by removing the algorithm; higher value correspond to more importance). See [Xu et al SAT 2012]"}
data["Performance Analysis"]["Contribution of algorithms"]["Shapley Values"] = {"figure": shap_fn,
"tooltip": "Shapley values (i.e., marginal contribution across all possible subsets of portfolios; again higher values corresponds to more importance; see [Frechette et al AAAI'16]. For running time scenarios, the metric is cutoff - running time; for non-running time, the metric is (worst performance across all instances and algorithms)- performance."}
portfolio_table = pa.get_greedy_portfolio_constr()
data["Performance Analysis"]["Contribution of algorithms"]["Portfolio Construction"] = {
"tooltip": "Starting with the Single Best Solver, iteratively add algorithms to the portfolio by optimizing VBS",
"table" : portfolio_table
}
# generate footprint plots
if config["Performance Analysis"].get("Footprints"):
footprints_plots = pa.get_footprints()
data["Performance Analysis"]["Footprints"] = OrderedDict({
"tooltip": "Footprints of algorithms (instances red marked if the algorithm is at most 5% away from oracle performance) in 2-d PCA feature space. Inspired by [Smith-Miles et al. Computers & OR 2014]"})
footprints_plots = sorted(footprints_plots, key=lambda x: x[0])
for plot_tuple in footprints_plots:
key = "%s" % (plot_tuple[0])
data["Performance Analysis"]["Footprints"][
key] = {"html": plot_tuple[1], "figure": plot_tuple[2]}
# generate instance hardness plot
if config["Performance Analysis"].get("Instance Hardness"):
hardness_plot = pa.instance_hardness()
data["Performance Analysis"]["Instance Hardness"] = {"tooltip": "Projecting instances into 2d PCA feature space; the color encodes the number of algorithms that perform within 5% of the oracle performance.",
"figure": hardness_plot[1],
"html": hardness_plot[0]}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# feature analysis
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
if config.get("Feature Analysis"):
data["Feature Analysis"] = OrderedDict()
fa = FeatureAnalysis(output_dn=self.output_dn,
scenario=self.scenario)
if config["Feature Analysis"].get("Status Bar Plot"):
status_plot = fa.get_bar_status_plot()
data["Feature Analysis"]["Status Bar Plot"] = {"tooltip": "Stacked bar plots for runstatus of each feature groupe",
"figure": status_plot}
# box and violin plots
if config["Feature Analysis"].get("Violin and box plots"):
name_plots = fa.get_box_violin_plots()
data["Feature Analysis"]["Violin and box plots"] = OrderedDict({
"tooltip": "Violin and Box plots to show the distribution of each instance feature. We removed NaN from the data."})
for plot_tuple in name_plots:
key = "%s" % (plot_tuple[0])
data["Feature Analysis"]["Violin and box plots"][
key] = {"figure": plot_tuple[1]}
# correlation plot
if config["Feature Analysis"].get("Correlation plot"):
correlation_plot = fa.correlation_plot()
data["Feature Analysis"]["Correlation plot"] = {"tooltip": "Correlation based on Pearson product-moment correlation coefficients between all features and clustered with Wards hierarchical clustering approach. Darker fields corresponds to a larger correlation between the features.",
"figure": correlation_plot}
# feature importance
if config["Feature Analysis"].get("Feature importance"):
importance_plot = fa.feature_importance()
data["Feature Analysis"]["Feature importance"] = {"tooltip": "Using the approach of SATZilla'11, we train a cost-sensitive random forest for each pair of algorithms and average the feature importance (using gini as splitting criterion) across all forests. We show the median, 25th and 75th percentiles across all random forests of the 15 most important features.",
"figure": importance_plot}
# cluster instances in feature space
if config["Feature Analysis"].get("Clustering"):
cluster_plot = fa.cluster_instances()
data["Feature Analysis"]["Clustering"] = {"tooltip": "Clustering instances in 2d; the color encodes the cluster assigned to each cluster. Similar to ISAC, we use a k-means to cluster the instances in the feature space. As pre-processing, we use standard scaling and a PCA to 2 dimensions. To guess the number of clusters, we use the silhouette score on the range of 2 to 12 in the number of clusters",
"figure": cluster_plot}
# get cdf plot
if self.scenario.feature_cost_data is not None and config["Feature Analysis"].get("CDF plot on feature costs"):
cdf_plot = fa.get_feature_cost_cdf_plot()
data["Feature Analysis"]["CDF plot on feature costs"] = {"tooltip": "Cumulative Distribution function (CDF) plots. At each point x (e.g., running time cutoff), for how many of the instances (in percentage) have we computed the instance features. Faster feature computation steps have a higher curve. Missing values are imputed with the maximal value (or running time cutoff).",
"figure": cdf_plot}
self.create_html(data=data)
def create_html(self, data: OrderedDict):
'''
create html report
'''
html_builder = HTMLBuilder(output_dn=self.output_dn,
scenario_name=self.scenario.scenario)
html_builder.generate_html(data)
def get_meta_data(self):
'''
read meta data from self.scenario and generate a pandas.Dataframe with it
'''
data = []
data.append(
("Number of instances", len(self.scenario.instances)))
data.append(
("Number of algorithms", len(self.scenario.algorithms)))
data.append(
("Performance measure", self.scenario.performance_measure[0]))
data.append(("Performance type", self.scenario.performance_type[0]))
data.append(("Maximize?", str(self.scenario.maximize[0])))
if self.scenario.algorithm_cutoff_time:
data.append(
("Running time cutoff (algorithm)", str(self.scenario.algorithm_cutoff_time)))
if self.scenario.algorithm_cutoff_memory:
data.append(
("Memory cutoff (algorithm)", str(self.scenario.algorithm_cutoff_memory)))
if self.scenario.features_cutoff_time:
data.append(
("Running time cutoff (features)", str(self.scenario.features_cutoff_time)))
if self.scenario.features_cutoff_memory:
data.append(
("Memory cutoff (Features)", str(self.scenario.features_cutoff_memory)))
data.append(
("# Deterministic features", len(self.scenario.features_deterministic)))
data.append(
("# Stochastic features", len(self.scenario.features_stochastic)))
data.append(("# Feature groups", len(self.scenario.feature_steps)))
data.append(
("# Deterministic algorithms", len(self.scenario.algortihms_deterministics)))
data.append(
("# Stochastic algorithms", len(self.scenario.algorithms_stochastic)))
if self.scenario.feature_cost_data is not None:
data.append(("Feature costs provided?", "True"))
else:
data.append(("Feature costs provided?", "False"))
meta_data = pd.DataFrame(data=list(map(lambda x: x[1], data)), index=list(
map(lambda x: x[0], data)), columns=[""])
return meta_data
|
|
import datetime
import json
import re
import colorsys
import difflib
import logging
import tagging
import auxiliary.tag_suggestions
from actstream import action
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import (HttpResponse, HttpResponseRedirect, Http404,
HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy, ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import DetailView, ListView
from tagging.models import TaggedItem, Tag
import models
from models import Committee, CommitteeMeeting, Topic
from ok_tag.views import BaseTagMemberListView
from auxiliary.mixins import GetMoreView
from forms import EditTopicForm, LinksFormset
from hashnav import method_decorator as hashnav_method_decorator
from knesset.utils import clean_string
from laws.models import Bill, PrivateProposal
from links.models import Link
from mks.models import Member
from mks.utils import get_all_mk_names
from mmm.models import Document
from lobbyists.models import Lobbyist
logger = logging.getLogger("open-knesset.committees.views")
class CommitteeListView(ListView):
context_object_name = 'committees'
queryset = Committee.objects.exclude(type='plenum').exclude(hide=True)
# paginate_by = 20
INITIAL_TOPICS = 10
def get_context_data(self, **kwargs):
context = super(CommitteeListView, self).get_context_data(**kwargs)
context["topics"] = Topic.objects.summary()[:self.INITIAL_TOPICS]
context["topics_more"] = Topic.objects.summary().count() > self.INITIAL_TOPICS
context['tags_cloud'] = Tag.objects.cloud_for_model(CommitteeMeeting)
context["INITIAL_TOPICS"] = self.INITIAL_TOPICS
return context
class TopicsMoreView(GetMoreView):
"""Get partially rendered member actions content for AJAX calls to 'More'"""
paginate_by = 20
template_name = 'committees/_topics_summary.html'
def get_queryset(self):
return Topic.objects.summary()
class CommitteeDetailView(DetailView):
model = Committee
view_cache_key = 'committee_detail_%d'
SEE_ALL_THRESHOLD = 10
def get_context_data(self, *args, **kwargs):
context = super(CommitteeDetailView, self).get_context_data(**kwargs)
cm = context['object']
cm.sorted_mmm_documents = cm.mmm_documents.order_by('-publication_date')[:self.SEE_ALL_THRESHOLD]
cached_context = cache.get(self.view_cache_key % cm.id, {})
if not cached_context:
self._build_context_data(cached_context, cm)
# cache.set('committee_detail_%d' % cm.id, cached_context,
# settings.LONG_CACHE_TIME)
context.update(cached_context)
context['annotations'] = cm.annotations.order_by('-timestamp')
context['topics'] = cm.topic_set.summary()[:5]
return context
def _build_context_data(self, cached_context, cm):
cached_context['chairpersons'] = cm.chairpersons.all()
cached_context['replacements'] = cm.replacements.all()
cached_context['members'] = cm.members_by_presence()
recent_meetings, more_meetings_available = cm.recent_meetings(limit=self.SEE_ALL_THRESHOLD)
cached_context['meetings_list'] = recent_meetings
cached_context['more_meetings_available'] = more_meetings_available
future_meetings, more_future_meetings_available = cm.future_meetings(limit=self.SEE_ALL_THRESHOLD)
cached_context['future_meetings_list'] = future_meetings
cached_context['more_future_meetings_available'] = more_future_meetings_available
cur_date = datetime.datetime.now()
not_yet_published_meetings, more_unpublished_available = cm.protocol_not_yet_published_meetings(
end_date=cur_date, limit=self.SEE_ALL_THRESHOLD)
cached_context['protocol_not_yet_published_list'] = not_yet_published_meetings
cached_context['more_unpublished_available'] = more_unpublished_available
class MeetingDetailView(DetailView):
model = CommitteeMeeting
def get_queryset(self):
return super(MeetingDetailView, self).get_queryset().select_related('committee')
def get_context_data(self, *args, **kwargs):
context = super(MeetingDetailView, self).get_context_data(*args, **kwargs)
cm = context['object']
colors = {}
speakers = cm.parts.order_by('speaker__mk').values_list('header', 'speaker__mk').distinct()
n = speakers.count()
for (i, (p, mk)) in enumerate(speakers):
(r, g, b) = colorsys.hsv_to_rgb(float(i) / n, 0.5 if mk else 0.3, 255)
colors[p] = 'rgb(%i, %i, %i)' % (r, g, b)
context['title'] = _('%(committee)s meeting on %(date)s') % {'committee': cm.committee.name,
'date': cm.date_string}
context['description'] = _('%(committee)s meeting on %(date)s on topic %(topic)s') \
% {'committee': cm.committee.name,
'date': cm.date_string,
'topic': cm.topics}
context['description'] = clean_string(context['description']).replace('"', '')
page = self.request.GET.get('page', None)
if page:
context['description'] += _(' page %(page)s') % {'page': page}
context['colors'] = colors
parts_lengths = {}
for part in cm.parts.all():
parts_lengths[part.id] = len(part.body)
context['parts_lengths'] = json.dumps(parts_lengths)
context['paginate_by'] = models.COMMITTEE_PROTOCOL_PAGINATE_BY
if cm.committee.type == 'plenum':
context['members'] = cm.mks_attended.order_by('name')
context['hide_member_presence'] = True
else:
# get meeting members with presence calculation
meeting_members_ids = set(m.id for m in cm.mks_attended.all())
context['members'] = cm.committee.members_by_presence(ids=meeting_members_ids)
context['hide_member_presence'] = False
meeting_text = [cm.topics] + [part.body for part in cm.parts.all()]
context['tag_suggestions'] = auxiliary.tag_suggestions.extract_suggested_tags(cm.tags,
meeting_text)
context['mentioned_lobbyists'] = cm.main_lobbyists_mentioned
context['mentioned_lobbyist_corporations'] = cm.main_lobbyist_corporations_mentioned
return context
@hashnav_method_decorator(login_required)
def post(self, request, **kwargs):
cm = get_object_or_404(CommitteeMeeting, pk=kwargs['pk'])
request = self.request
user_input_type = request.POST.get('user_input_type')
if user_input_type == 'bill':
bill_id = request.POST.get('bill_id')
if bill_id.isdigit():
bill = get_object_or_404(Bill, pk=bill_id)
else: # not a number, maybe its p/1234
m = re.findall('\d+', bill_id)
if len(m) != 1:
raise ValueError("didn't find exactly 1 number in bill_id=%s" % bill_id)
pp = PrivateProposal.objects.get(proposal_id=m[0])
bill = pp.bill
if bill.stage in ['1', '2', '-2',
'3']: # this bill is in early stage, so cm must be one of the first meetings
bill.first_committee_meetings.add(cm)
else: # this bill is in later stages
v = bill.first_vote # look for first vote
if v and v.time.date() < cm.date: # and check if the cm is after it,
bill.second_committee_meetings.add(
cm) # if so, this is a second committee meeting
else: # otherwise, assume its first cms.
bill.first_committee_meetings.add(cm)
bill.update_stage()
action.send(request.user, verb='added-bill-to-cm',
description=cm,
target=bill,
timestamp=datetime.datetime.now())
if user_input_type == 'mk':
mk_names = Member.objects.values_list('name', flat=True)
mk_name = difflib.get_close_matches(request.POST.get('mk_name'),
mk_names)[0]
mk = Member.objects.get(name=mk_name)
cm.mks_attended.add(mk)
cm.save() # just to signal, so the attended Action gets created.
action.send(request.user,
verb='added-mk-to-cm',
description=cm,
target=mk,
timestamp=datetime.datetime.now())
if user_input_type == 'remove-mk':
mk_names = Member.objects.values_list('name', flat=True)
mk_name = difflib.get_close_matches(request.POST.get('mk_name'),
mk_names)[0]
mk = Member.objects.get(name=mk_name)
cm.mks_attended.remove(mk)
cm.save() # just to signal, so the attended Action gets created.
action.send(request.user,
verb='removed-mk-to-cm',
description=cm,
target=mk,
timestamp=datetime.datetime.now())
if user_input_type == 'add-lobbyist':
l = Lobbyist.objects.get(person__name=request.POST.get(
'lobbyist_name'))
cm.lobbyists_mentioned.add(l)
if user_input_type == 'remove-lobbyist':
l = Lobbyist.objects.get(person__name=request.POST.get(
'lobbyist_name'))
cm.lobbyists_mentioned.remove(l)
if user_input_type == "protocol":
if not cm.protocol_text: # don't override existing protocols
cm.protocol_text = request.POST.get('protocol_text')
cm.save()
cm.create_protocol_parts()
mks, mk_names = get_all_mk_names()
cm.find_attending_members(mks, mk_names)
return HttpResponseRedirect(".")
_('added-bill-to-cm')
_('added-mk-to-cm')
_('removed-mk-from-cm')
class TopicListView(ListView):
model = Topic
context_object_name = 'topics'
def get_queryset(self):
qs = Topic.objects.get_public()
if "committee_id" in self.kwargs:
qs = qs.filter(committees__id=self.kwargs["committee_id"])
return qs
def get_context_data(self, **kwargs):
context = super(TopicListView, self).get_context_data(**kwargs)
committee_id = self.kwargs.get("committee_id", False)
context["committee"] = committee_id and Committee.objects.get(pk=committee_id)
return context
class TopicDetailView(DetailView):
model = Topic
context_object_name = 'topic'
@method_decorator(ensure_csrf_cookie)
def dispatch(self, *args, **kwargs):
return super(TopicDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(TopicDetailView, self).get_context_data(**kwargs)
topic = context['object']
if self.request.user.is_authenticated():
p = self.request.user.profiles.get()
watched = topic in p.topics
else:
watched = False
context['watched_object'] = watched
return context
@login_required
def edit_topic(request, committee_id, topic_id=None):
if request.method == 'POST':
if topic_id:
t = Topic.objects.get(pk=topic_id)
if not t.can_edit(request.user):
return HttpResponseForbidden()
else:
t = None
edit_form = EditTopicForm(data=request.POST, instance=t)
links_formset = LinksFormset(request.POST)
if edit_form.is_valid() and links_formset.is_valid():
topic = edit_form.save(commit=False)
if topic_id:
topic.id = topic_id
else: # new topic
topic.creator = request.user
topic.save()
edit_form.save_m2m()
links = links_formset.save(commit=False)
ct = ContentType.objects.get_for_model(topic)
for link in links:
link.content_type = ct
link.object_pk = topic.id
link.save()
messages.add_message(request, messages.INFO, 'Topic has been updated')
return HttpResponseRedirect(
reverse('topic-detail', args=[topic.id]))
if request.method == 'GET':
if topic_id: # editing existing topic
t = Topic.objects.get(pk=topic_id)
if not t.can_edit(request.user):
return HttpResponseForbidden()
edit_form = EditTopicForm(instance=t)
ct = ContentType.objects.get_for_model(t)
links_formset = LinksFormset(queryset=Link.objects.filter(
content_type=ct, object_pk=t.id))
else: # create new topic for given committee
c = Committee.objects.get(pk=committee_id)
edit_form = EditTopicForm(initial={'committees': [c]})
links_formset = LinksFormset(queryset=Link.objects.none())
return render_to_response('committees/edit_topic.html',
context_instance=RequestContext(request,
{'edit_form': edit_form,
'links_formset': links_formset,
}))
@login_required
def delete_topic(request, pk):
topic = get_object_or_404(Topic, pk=pk)
if topic.can_edit(request.user):
# Delete on POST
if request.method == 'POST':
topic.status = models.TOPIC_DELETED
topic.save()
return HttpResponseRedirect(reverse('committee-detail',
args=[topic.committees.all()[0].id]))
# Render a form on GET
else:
return render_to_response('committees/delete_topic.html',
{'topic': topic},
RequestContext(request)
)
else:
raise Http404
class MeetingsListView(ListView):
allow_empty = False
paginate_by = 20
def get_context_data(self, *args, **kwargs):
context = super(MeetingsListView, self).get_context_data(**kwargs)
committee_id = self.kwargs.get('committee_id')
if committee_id:
items = context['object_list']
committee = items[0].committee
if committee.type == 'plenum':
committee_name = _('Knesset Plenum')
else:
committee_name = committee.name
context['title'] = _('All meetings by %(committee)s') % {
'committee': committee_name}
context['committee'] = committee
else:
context['title'] = _('Parliamentary committees meetings')
context['committee_id'] = committee_id
context['none'] = _('No %(object_type)s found') % {
'object_type': CommitteeMeeting._meta.verbose_name_plural}
return context
def get_queryset(self):
c_id = self.kwargs.get('committee_id', None)
qs = CommitteeMeeting.objects.filter_and_order(**dict(self.request.GET))
if c_id:
qs = qs.filter(committee__id=c_id)
return qs
class UnpublishedProtocolslistView(ListView):
allow_empty = False
paginate_by = 20
template_name = 'committees/committee_full_events_list.html'
def get_context_data(self, *args, **kwargs):
context = super(UnpublishedProtocolslistView, self).get_context_data(**kwargs)
committee_id = self.kwargs.get('committee_id')
if committee_id:
# items = context['object_list']
committee = Committee.objects.get(pk=committee_id)
if committee.type == 'plenum':
committee_name = _('Knesset Plenum')
else:
committee_name = committee.name
context['title'] = _('All unpublished protocols by %(committee)s') % {
'committee': committee_name}
context['committee'] = committee
else:
raise Http404('missing committee_id')
context['committee_id'] = committee_id
context['none'] = _('No %(object_type)s found') % {
'object_type': CommitteeMeeting._meta.verbose_name_plural}
return context
def get_queryset(self):
committee_id = self.kwargs.get('committee_id')
committee = Committee.objects.get(pk=committee_id)
end_date = datetime.datetime.now()
qs = committee.protocol_not_yet_published_meetings(end_date=end_date, do_limit=False)
return qs
class FutureMeetingslistView(ListView):
allow_empty = False
paginate_by = 20
template_name = 'committees/committee_full_events_list.html'
def get_context_data(self, *args, **kwargs):
context = super(FutureMeetingslistView, self).get_context_data(**kwargs)
committee_id = self.kwargs.get('committee_id')
if committee_id:
committee = Committee.objects.get(pk=committee_id)
if committee.type == 'plenum':
committee_name = _('Knesset Plenum')
else:
committee_name = committee.name
context['title'] = _('All future meetings by %(committee)s') % {
'committee': committee_name}
context['committee'] = committee
else:
raise Http404('missing committee_id')
context['committee_id'] = committee_id
context['none'] = _('No %(object_type)s found') % {
'object_type': CommitteeMeeting._meta.verbose_name_plural}
return context
def get_queryset(self):
committee_id = self.kwargs.get('committee_id')
committee = Committee.objects.get(pk=committee_id)
qs = committee.future_meetings(do_limit=False)
return qs
def parse_date(date_string):
return datetime.datetime.strptime(date_string, '%Y-%m-%d').date()
def meeting_list_by_date(request, *args, **kwargs):
committee_id = kwargs.get('committee_id')
date_string = kwargs.get('date')
try:
date = parse_date(date_string)
except:
raise Http404()
context = {}
if committee_id:
committee = Committee.objects.filter(pk=committee_id)[:1]
if not committee: # someone tried this with a non-existent committee
raise Http404()
else:
committee = committee[0]
context['committee'] = committee
qs = CommitteeMeeting.objects.filter(committee_id=committee_id)
context['title'] = _(
'Meetings by %(committee)s on date %(date)s') % {
'committee': committee, 'date': date}
context['committee_id'] = committee_id
else:
context['title'] = _(
'Parliamentary committees meetings on date %(date)s') % {
'date': date}
qs = CommitteeMeeting.objects.all()
qs = qs.filter(date=date)
context['object_list'] = qs
context['none'] = _('No %(object_type)s found') % {
'object_type': CommitteeMeeting._meta.verbose_name_plural}
return render_to_response("committees/committeemeeting_list.html",
context,
context_instance=RequestContext(request))
class MeetingTagListView(BaseTagMemberListView):
template_name = 'committees/committeemeeting_list_by_tag.html'
url_to_reverse = 'committeemeeting-tag'
def get_queryset(self):
return TaggedItem.objects.get_by_model(CommitteeMeeting,
self.tag_instance)
def get_mks_cloud(self):
mks = [cm.mks_attended.all() for cm in
TaggedItem.objects.get_by_model(
CommitteeMeeting, self.tag_instance)]
d = {}
for mk in mks:
for p in mk:
d[p] = d.get(p, 0) + 1
# now d is a dict: MK -> number of meetings in this tag
mks = d.keys()
for mk in mks:
mk.count = d[mk]
return tagging.utils.calculate_cloud(mks)
def get_context_data(self, *args, **kwargs):
context = super(MeetingTagListView, self).get_context_data(*args,
**kwargs)
context['title'] = ugettext_lazy(
'Committee Meetings tagged %(tag)s') % {
'tag': self.tag_instance.name}
context['members'] = self.get_mks_cloud()
return context
# TODO: This has be replaced by the class based view above for Django 1.5.
# Remove once working
#
# def meeting_tag(request, tag):
# tag_instance = get_tag(tag)
# if tag_instance is None:
# raise Http404(_('No Tag found matching "%s".') % tag)
#
# extra_context = {'tag':tag_instance}
# extra_context['tag_url'] = reverse('committeemeeting-tag',args=[tag_instance])
# extra_context['title'] = ugettext_lazy('Committee Meetings tagged %(tag)s') % {'tag': tag}
# qs = CommitteeMeeting
# queryset = TaggedItem.objects.get_by_model(qs, tag_instance)
# mks = [cm.mks_attended.all() for cm in
# TaggedItem.objects.get_by_model(CommitteeMeeting, tag_instance)]
# d = {}
# for mk in mks:
# for p in mk:
# d[p] = d.get(p,0)+1
# # now d is a dict: MK -> number of meetings in this tag
# mks = d.keys()
# for mk in mks:
# mk.count = d[mk]
# mks = tagging.utils.calculate_cloud(mks)
# extra_context['members'] = mks
# return generic.list_detail.object_list(request, queryset,
# template_name='committees/committeemeeting_list_by_tag.html', extra_context=extra_context)
def delete_topic_rating(request, object_id):
if request.method == 'POST':
topic = get_object_or_404(Topic, pk=object_id)
topic.rating.delete(request.user, request.META['REMOTE_ADDR'])
return HttpResponse('Vote deleted.')
class CommitteeMMMDocuments(ListView):
paginate_by = 20
allow_empty = True
template_name = 'committees/committee_mmm_documents.html'
def get_queryset(self):
self.c_id = self.kwargs.get('committee_id')
date = self.kwargs.get('date', None)
if date:
try:
date = parse_date(date)
documents = Document.objects.filter(req_committee__id=self.c_id,
publication_date=date).order_by(
'-publication_date')
except:
raise
else:
documents = Document.objects.filter(req_committee__id=self.c_id).order_by(
'-publication_date')
return documents
def get_context_data(self, **kwargs):
context = super(CommitteeMMMDocuments, self).get_context_data(**kwargs)
committee = Committee.objects.get(id=self.c_id)
context['committee'] = committee.name
context['committee_id'] = self.c_id
context['committee_url'] = committee.get_absolute_url()
return context
|
|
# Copyright 2014 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
cfg.CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token')
import keystoneclient.exceptions as kc_exception # noqa
from magnum.common import exception
from magnum.common import magnum_keystoneclient
from magnum.tests import base
from magnum.tests import utils
@mock.patch('keystoneclient.v3.client.Client')
class KeystoneClientTest(base.BaseTestCase):
"""Test cases for magnum.common.magnum_keystoneclient."""
def setUp(self):
super(KeystoneClientTest, self).setUp()
dummy_url = 'http://server.test:5000/v2.0'
self.ctx = utils.dummy_context()
self.ctx.auth_url = dummy_url
self.ctx.auth_token = 'abcd1234'
self.ctx.auth_token_info = None
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
cfg.CONF.set_override('admin_user', 'magnum',
group='keystone_authtoken')
cfg.CONF.set_override('admin_password', 'verybadpass',
group='keystone_authtoken')
cfg.CONF.set_override('admin_tenant_name', 'service',
group='keystone_authtoken')
def test_init_v3_token(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.project_id = None
self.ctx.trust_id = None
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
magnum_ks_client.client
self.assertIsNotNone(magnum_ks_client._client)
mock_ks.assert_called_once_with(token='abcd1234', project_id=None,
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_init_v3_bad_nocreds(self, mock_ks):
"""Test creating the client, no credentials."""
self.ctx.auth_token = None
self.ctx.trust_id = None
self.ctx.username = None
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
magnum_ks_client._v3_client_init)
def test_init_trust_token_access(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.project_id = 'abcd1234'
self.ctx.trust_id = None
self.ctx.auth_token_info = {'access': {'token': {'id': 'placeholder'}}}
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
magnum_ks_client.client
self.assertIsNotNone(magnum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'version': 'v2.0',
'token': {
'id': 'abcd1234'}},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_token(self, mock_ks):
self.ctx.project_id = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'token': {}}
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
magnum_ks_client.client
self.assertIsNotNone(magnum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'auth_token': 'abcd1234',
'version': 'v3'},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_none(self, mock_ks):
self.ctx.project_id = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'not_this': 'urg'}
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
magnum_ks_client._v3_client_init)
def test_create_trust_context_trust_id(self, mock_ks):
"""Test create_trust_context with existing trust_id."""
self.ctx.trust_id = 'atrust123'
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = magnum_ks_client.create_trust_context()
self.assertEqual(self.ctx.to_dict(), trust_context.to_dict())
mock_ks.assert_called_once_with(username='magnum',
auth_url='http://server.test:5000/v3',
password='verybadpass',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_create_trust_context_trust_create(self, mock_ks):
"""Test create_trust_context when creating a trust."""
class FakeTrust(object):
id = 'atrust123'
cfg.CONF.set_override('trusts_delegated_roles',
['magnum_assembly_update'])
getter_mock = mock.PropertyMock(side_effect=['1234', '5678'])
type(mock_ks.return_value.auth_ref).user_id = getter_mock
mock_ks.return_value.auth_ref.project_id = '42'
mock_ks.return_value.trusts.create.return_value = FakeTrust()
self.ctx.trust_id = None
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = magnum_ks_client.create_trust_context()
# admin_client and user client
expected = [mock.call(username='magnum',
project_name='service',
password='verybadpass',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3'),
mock.call(token='abcd1234',
project_id='test_tenant_id',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')]
self.assertEqual(expected, mock_ks.call_args_list)
self.assertEqual([mock.call(), mock.call()],
mock_ks.return_value.authenticate.call_args_list)
# trust creation
self.assertEqual('atrust123', trust_context.trust_id)
mock_ks.return_value.trusts.create.assert_called_once_with(
trustor_user='5678',
trustee_user='1234',
project='42',
impersonation=True,
role_names=['magnum_assembly_update'])
def test_init_admin_client_denied(self, mock_ks):
"""Test the admin_client property, auth failure path."""
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = None
mock_ks.return_value.authenticate.return_value = False
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
# Define wrapper for property or the property raises the exception
# outside of the assertRaises which fails the test
def get_admin_client():
magnum_ks_client.admin_client
self.assertRaises(exception.AuthorizationFailure,
get_admin_client)
def test_trust_init_fail(self, mock_ks):
"""Test consuming a trust when initializing, error scoping."""
self.ctx.username = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
mock_ks.return_value.auth_ref.trust_scoped = False
self.assertRaises(exception.AuthorizationFailure,
magnum_keystoneclient.KeystoneClientV3, self.ctx)
def test_trust_init_token(self, mock_ks):
"""Test trust_id takes precedence when token specified."""
self.ctx.username = None
self.ctx.trust_id = 'atrust123'
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNotNone(magnum_ks_client._client)
mock_ks.assert_called_once_with(username='magnum',
auth_url='http://server.test:5000/v3',
password='verybadpass',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_delete_trust(self, mock_ks):
"""Test delete_trust when deleting trust."""
mock_ks.return_value.trusts.delete.return_value = None
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(magnum_ks_client.delete_trust(trust_id='atrust123'))
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
def test_delete_trust_not_found(self, mock_ks):
"""Test delete_trust when trust already deleted."""
mock_delete = mock_ks.return_value.trusts.delete
mock_delete.side_effect = kc_exception.NotFound()
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(magnum_ks_client.delete_trust(trust_id='atrust123'))
@mock.patch.object(magnum_keystoneclient.KeystoneClientV3,
'_service_admin_creds')
def test_client_is_admin(self, mock_admin_creds, mock_ks):
"""Test client is admin when passing an admin_context."""
self.ctx.is_admin = True
magnum_ks_client = magnum_keystoneclient.KeystoneClientV3(self.ctx)
magnum_ks_client.client
self.assertIsNone(magnum_ks_client._client)
self.assertIsNotNone(magnum_ks_client._admin_client)
mock_admin_creds.assert_called_once_with()
|
|
import json
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.template import Context, Template
from django.utils import importlib
from django.utils.translation import check_for_language, to_locale, get_language
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return render_javascript_catalog(catalog, plural)
|
|
import sys
import datetime
from tornado import gen
from storm.db import Database, ConnectionPool
from storm import error
import tornado_mysql
from tornado_mysql.pools import Pool
cursor_type = tornado_mysql.cursors.DictCursor
class MySql(Database):
@gen.coroutine
def connect(self, callback=None):
if not self.is_connected:
self.db = Pool(
dict(user=self.connection.user,
passwd=self.connection.password,
db=self.connection.database,
cursorclass=cursor_type),
max_idle_connections=1,
max_open_connections=1)
self.is_connected = True
if callback is None:
raise gen.Return(True)
callback(True)
@gen.coroutine
def close(self, callback=None):
if self.is_connected:
yield self.db.close()
self.is_connected = False
if callback is None:
raise gen.Return(True)
callback(True)
@staticmethod
def _quote(value):
if value is None:
return 'null'
if isinstance(value, datetime.datetime):
return "'%s'" % str(value)
if value == 'NOW()':
return value
# convert python3 byte strings
if sys.version_info >= (3,0,0) and isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, float):
return str(value)
try:
value = str(int(value))
except:
value = "'%s'" % tornado_mysql.converters.escape_string(value)
return value
@gen.coroutine
def select_one(self, table, **kwargs):
yield self.connect()
where_bits = []
for key in kwargs:
where_bits.append("`%s` = %s" % (key, MySql._quote(kwargs[key])))
sql = "SELECT * FROM `%s` WHERE BINARY %s" % (table, ' AND BINARY '.join(where_bits))
cur = yield self.db.execute(sql)
result = cur.fetchone()
if result is None:
raise error.StormNotFoundError("Object of type: %s not found with args: %s" % (table, kwargs))
callback = kwargs.get('callback')
if callback is None:
raise gen.Return(result)
callback(result)
@gen.coroutine
def select_multiple(self, table, query, **kwargs):
yield self.connect()
query.bind(':table', table)
page = kwargs.get('page')
if page:
page_size = kwargs.get('page_size', 10)
query.limit = page_size
query.offset = (page - 1) * page_size
raw_sql = query.sql
total_count = 0
tasks = [self.db.execute(raw_sql)]
if page:
tasks.append(self.db.execute(query.count_sql))
cursors = yield tasks
results = [cursors[0].fetchall()]
if len(cursors) > 1:
results.append(cursors[1].fetchall())
data = results[0]
total_count = len(data)
if len(results) == 2:
total_count = results[1][0]['count']
data, filtered_out_count = query.apply_filters(data)
total_count -= filtered_out_count
callback = kwargs.get('callback', None)
if callback is None:
raise gen.Return([data, total_count])
callback([data, total_count])
@gen.coroutine
def insert(self, table, data, callback=None):
yield self.connect()
fields = []
values = []
for key in data:
fields.append(key)
value = MySql._quote(data[key])
values.append(value)
sql = "INSERT INTO `%s` (`%s`) VALUES (%s)" % (table, '`, `'.join(fields), ', '.join(values))
# double escape % sign so we don't get an error if one of the fields
# we are trying to insert has a % in it
sql = sql.replace('%', '%%')
cur = yield self.db.execute(sql)
insert_id = cur.lastrowid
if callback is None:
raise gen.Return(insert_id)
callback(insert_id)
@gen.coroutine
def update(self, table, data, changes, primary_key, callback=None):
if len(changes) == 0:
raise gen.Return(False)
yield self.connect()
if 'modified_on' in data:
changes.append('modified_on')
data['modified_on'] = 'NOW()'
pairs = []
compound_primary_key = isinstance(primary_key, list)
for key in changes:
if compound_primary_key and key in primary_key:
continue
if key == primary_key:
continue
pairs.append("`%s` = %s" % (key, MySql._quote(data[key])))
if not compound_primary_key:
primary_key = [primary_key]
where_bits = []
for key in primary_key:
where_bits.append("`%s` = %s" % (key, MySql._quote(data[key])))
sql = "UPDATE `%s` SET %s WHERE %s" % (table, ', '.join(pairs), ' AND '.join(where_bits))
result = yield self.db.execute(sql)
if callback is None:
raise gen.Return(result)
callback(result)
class QueryFilter(object):
TYPE_EQUAL = '='
TYPE_NOT_EQUAL = '!='
TYPE_GREATER_THAN = '>'
TYPE_GREATER_THAN_OR_EQUAL = '>='
TYPE_LESS_THAN = '<'
TYPE_LESS_THAN_OR_EQUAL = '<='
TYPE_IN = 'in'
TYPE_NOT_IN = 'not in'
def __init__(self, key, comparison, value):
self.key = key
self.comparison = comparison.lower()
self.value = value
def matches(self, row):
if self.comparison == self.TYPE_EQUAL:
return row[self.key] == self.value
elif self.comparison == self.TYPE_NOT_EQUAL:
return row[self.key] != self.value
elif self.comparison == self.TYPE_GREATER_THAN:
return row[self.key] > self.value
elif self.comparison == self.TYPE_GREATER_THAN_OR_EQUAL:
return row[self.key] >= self.value
elif self.comparison == self.TYPE_LESS_THAN:
return row[self.key] < self.value
elif self.comparison == self.TYPE_LESS_THAN_OR_EQUAL:
return row[self.key] <= self.value
elif self.comparison == self.TYPE_IN:
return row[self.key] in self.value
elif self.comparison == self.TYPE_NOT_IN:
return row[self.key] not in self.value
return True
class Query(object):
def __init__(self, sql):
self._sql = sql
self.count_sql = None
self.to_bind = {}
self.limit = None
self.offset = None
self.filters = []
def bind(self, key, value):
self.to_bind[key] = value
return self
def filter(self, key, comparison, value):
self.filters.append(QueryFilter(key, comparison, value))
return self
def all_filters_allow(self, row):
for f in self.filters:
if not f.matches(row):
return False
return True
def apply_filters(self, data):
if self.limit or self.offset:
raise error.StormError("""You cannot apply filters when using page
and page_size to limit the mysql data""")
if len(data) == 0 or len(self.filters) == 0:
return (data, 0)
new_data = []
num_removed = 0
for row in data:
if self.all_filters_allow(row):
new_data.append(row)
continue
num_removed += 1
return (new_data, num_removed)
@property
def sql(self):
sql = self._sql
if ':table' in self.to_bind:
sql = sql.replace(':table', "`%s`" % self.to_bind[':table'])
del(self.to_bind[':table'])
for key in self.to_bind:
sql = sql.replace(key, MySql._quote(self.to_bind[key]))
if self.limit:
self.count_sql = "SELECT count(*) count FROM %s" % sql.split(' FROM ')[1]
if self.limit:
sql += " LIMIT %d" % self.limit
if self.offset:
sql += " OFFSET %d" % self.offset
return sql
class ConnectionPool(ConnectionPool):
def __init__(self, connection, count=10, lifetime=3600):
super(ConnectionPool, self).__init__(connection, count, lifetime)
db = MySql(self.connection)
db.db = Pool(
dict(user=connection.user,
passwd=connection.password,
db=connection.database,
cursorclass=cursor_type),
max_idle_connections=self.count,
max_recycle_sec=self.lifetime,
max_open_connections=self.count+10)
db.is_connected = True
self._db = db
def get_db_class(self):
return MySql
@gen.coroutine
def get_db(self, callback=None):
if callback is not None:
callback(self._db)
return
raise gen.Return(self._db)
|
|
import urllib2
import urlparse
from datetime import datetime, timedelta
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import unittest
from django.test.client import Client
from djpubsubhubbub.models import Subscription, SubscriptionManager
from djpubsubhubbub.signals import pre_subscribe, verified, updated
class MockResponse(object):
def __init__(self, status, data=None):
self.status = status
self.code = status
self.data = data
def info(self):
return self
def read(self):
if self.data is None:
return ''
data, self.data = self.data, None
return data
class PSHBTestBase(unittest.TestCase):
def setUp(self):
self._old_send_request = SubscriptionManager._send_request
SubscriptionManager._send_request = self._send_request
self.client = Client()
self.responses = []
self.requests = []
self.signals = []
# Remove attached (external) signals
self.remove_external_signals()
# Setup local signals
for connecter in pre_subscribe, verified, updated:
def callback(signal=None, **kwargs):
self.signals.append((signal, kwargs))
connecter.connect(callback, dispatch_uid=connecter, weak=False)
def tearDown(self):
SubscriptionManager._send_request = self._old_send_request
del self._old_send_request
for signal in pre_subscribe, verified:
signal.disconnect(dispatch_uid=signal)
def _send_request(self, url, data, headers={}, debug=False):
self.requests.append((url, data, headers))
return self.responses.pop()
def remove_external_signals(self):
uids = getattr(settings, 'PUBSUBHUBBUB_DISCONNECT_SIGNALS', [])
for uid in uids:
for connecter in pre_subscribe, verified, updated:
connecter.disconnect(dispatch_uid=uid)
class PSHBSubscriptionManagerTest(PSHBTestBase):
def test_sync_verify(self):
"""
If the hub returns a 204 response, the subscription is verified and
active.
"""
# Clear out all subscriptions first
Subscription.objects.all().delete()
self.responses.append(MockResponse(204))
sub = Subscription.objects.do_action('topic', 'hub', 'callback', 2000)
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0], (pre_subscribe, {'sender': sub,
'created': True}))
self.assertEquals(sub.hub, 'hub')
self.assertEquals(sub.topic, 'topic')
self.assertEquals(sub.verified, False)
self.assertEquals(sub.is_subscribed, False)
rough_expires = datetime.now() + timedelta(seconds=2000)
self.assert_(abs(sub.lease_expires - rough_expires).seconds < 5,
'lease more than 5 seconds off')
self.assertEquals(len(self.requests), 1)
request = self.requests[0]
self.assertEquals(request[0], 'hub')
self.assertEquals(request[1]['mode'], 'subscribe')
self.assertEquals(request[1]['topic'], 'topic')
self.assertEquals(request[1]['callback'], 'callback')
self.assertEquals(request[1]['verify'], 'sync')
self.assertEquals(request[1]['verify_token'], sub.verify_token)
self.assertEquals(request[1]['lease_seconds'], 2000)
def test_async_verify(self):
"""
If the hub returns a 202 response, we should not assume the
subscription is verified.
"""
# Clear out all subscriptions first
Subscription.objects.all().delete()
self.responses.append(MockResponse(202))
sub = Subscription.objects.do_action(
'topic',
'hub',
'callback',
2000,
verify='async',
)
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0], (pre_subscribe, {'sender': sub,
'created': True}))
self.assertEquals(sub.hub, 'hub')
self.assertEquals(sub.topic, 'topic')
self.assertEquals(sub.verified, False)
self.assertEquals(sub.is_subscribed, False)
rough_expires = datetime.now() + timedelta(seconds=2000)
self.assert_(abs(sub.lease_expires - rough_expires).seconds < 5,
'lease more than 5 seconds off')
self.assertEquals(len(self.requests), 1)
request = self.requests[0]
self.assertEquals(request[0], 'hub')
self.assertEquals(request[1]['mode'], 'subscribe')
self.assertEquals(request[1]['topic'], 'topic')
self.assertEquals(request[1]['callback'], 'callback')
self.assertEquals(request[1]['verify'], 'async')
self.assertEquals(request[1]['verify_token'], sub.verify_token)
self.assertEquals(request[1]['lease_seconds'], 2000)
def test_least_seconds_default(self):
"""
If the number of seconds to lease the subscription is not specified, it
should default to 2592000 (30 days).
"""
self.responses.append(MockResponse(202))
sub = Subscription.objects.do_action('topic', 'hub', 'callback')
rough_expires = datetime.now() + timedelta(seconds=2592000)
self.assert_(abs(sub.lease_expires - rough_expires).seconds < 5,
'lease more than 5 seconds off')
self.assertEquals(len(self.requests), 1)
request = self.requests[0]
self.assertEquals(request[1]['lease_seconds'], 2592000)
def test_error_on_subscribe_raises_URLError(self):
"""
If a non-202/204 status is returned, raise a URLError.
"""
self.responses.append(MockResponse(500, 'error data'))
try:
Subscription.objects.do_action('topic', 'hub', 'callback')
except urllib2.URLError, e:
self.assertEquals(
e.reason,
'error with mode "subscribe" to topic on hub:\nerror data')
else:
self.fail('subscription did not raise URLError exception')
class PSHBCallbackViewTestCase(PSHBTestBase):
def test_verify(self):
"""
Getting the callback from the server should verify the subscription.
"""
sub, _ = Subscription.objects.get_or_create(
topic='topic',
hub='hub',
verified=False)
verify_token = sub.generate_token('subscribe')
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content, 'challenge')
sub = Subscription.objects.get(pk=sub.pk)
self.assertEquals(sub.verified, True)
self.assertEquals(sub.is_subscribed, True)
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0], (verified, {'sender': sub}))
def test_404(self):
"""
Various things sould return a 404:
* invalid primary key in the URL
* token doesn't start with 'subscribe'
* subscription doesn't exist
* token doesn't match the subscription
"""
sub, _ = Subscription.objects.get_or_create(
topic='topic',
hub='hub',
verified=False)
verify_token = sub.generate_token('subscribe')
response = self.client.get(reverse('pubsubhubbub_callback',
args=(0,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token[1:]})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token[1:]})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic + 'extra',
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
response = self.client.get(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
{'hub.mode': 'subscribe',
'hub.topic': sub.topic,
'hub.challenge': 'challenge',
'hub.lease_seconds': 2000,
'hub.verify_token': verify_token[:-5]})
self.assertEquals(response.status_code, 404)
self.assertEquals(len(self.signals), 0)
class PSHBUpdateTestCase(PSHBTestBase):
def test_update(self):
# this data comes from
# http://pubsubhubbub.googlecode.com/svn/trunk/pubsubhubbub-core-0.1.html#anchor3
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<!-- Example of a full entry. -->
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
<!-- Example of an entity that isn't full/is truncated. This is implied
by the lack of a <content> element and a <summary> element instead. -->
<entry >
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<summary>
What a happy cat!
</summary>
</entry>
<!-- Meta-data only; implied by the lack of <content> and
<summary> elements. -->
<entry>
<title>Garfield</title>
<link rel="alternate" href="http://publisher.example.com/happycat24.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
</entry>
<!-- Context entry that's meta-data only and not new. Implied because the
update time on this entry is before the //atom:feed/updated time. -->
<entry>
<title>Nermal</title>
<link rel="alternate" href="http://publisher.example.com/happycat23s.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-07-10T12:28:13Z</updated>
</entry>
</atom:feed>
"""
sub, _ = Subscription.objects.get_or_create(
hub="http://myhub.example.com/endpoint",
topic="http://publisher.example.com/happycats.xml")
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(callback_data), 1)
sender, update = callback_data[0]
self.assertEquals(sender, sub)
self.assertEquals(len(update.entries), 4)
self.assertEquals(update.entries[0].id,
'http://publisher.example.com/happycat25.xml')
self.assertEquals(update.entries[1].id,
'http://publisher.example.com/happycat25.xml')
self.assertEquals(update.entries[2].id,
'http://publisher.example.com/happycat25.xml')
self.assertEquals(update.entries[3].id,
'http://publisher.example.com/happycat25.xml')
def test_update_with_changed_hub(self):
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
</atom:feed>
"""
sub, _ = Subscription.objects.get_or_create(
hub="hub",
topic="http://publisher.example.com/happycats.xml",
lease_expires=datetime.now() + timedelta(days=1))
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
self.responses.append(MockResponse(204))
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(
Subscription.objects.filter(
hub='http://myhub.example.com/endpoint',
topic='http://publisher.example.com/happycats.xml',
).count(), 1)
self.assertEquals(len(self.requests), 1)
self.assertEquals(self.requests[0][0],
'http://myhub.example.com/endpoint')
callback_url = urlparse.urljoin(
'http://testserver',
reverse('pubsubhubbub_callback', args=(sub.pk,)),
)
self.assertEquals(self.requests[0][1]['callback'], callback_url)
self.assert_((self.requests[0][1]['lease_seconds'] - 86400) < 5)
def test_update_with_changed_self(self):
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
</atom:feed>
"""
sub, _ = Subscription.objects.get_or_create(
hub="http://myhub.example.com/endpoint",
topic="topic",
lease_expires=datetime.now() + timedelta(days=1))
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
self.responses.append(MockResponse(204))
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(
Subscription.objects.filter(
hub='http://myhub.example.com/endpoint',
topic='http://publisher.example.com/happycats.xml',
).count(), 1)
self.assertEquals(len(self.requests), 1)
self.assertEquals(self.requests[0][0],
'http://myhub.example.com/endpoint')
callback_url = urlparse.urljoin(
'http://testserver',
reverse('pubsubhubbub_callback', args=(sub.pk,)),
)
self.assertEquals(self.requests[0][1]['callback'], callback_url)
self.assert_((self.requests[0][1]['lease_seconds'] - 86400) < 5)
def test_update_with_changed_hub_and_self(self):
update_data = """<?xml version="1.0"?>
<atom:feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://myhub.example.com/endpoint" />
<link rel="self" href="http://publisher.example.com/happycats.xml" />
<updated>2008-08-11T02:15:01Z</updated>
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
</atom:feed>
"""
sub, _ = Subscription.objects.get_or_create(
hub="hub",
topic="topic",
lease_expires=datetime.now() + timedelta(days=1))
callback_data = []
updated.connect(
lambda sender=None, update=None, **kwargs: callback_data.append(
(sender, update)),
weak=False)
self.responses.append(MockResponse(204))
response = self.client.post(reverse('pubsubhubbub_callback',
args=(sub.pk,)),
update_data, 'application/atom+xml')
self.assertEquals(response.status_code, 200)
self.assertEquals(
Subscription.objects.filter(
hub='http://myhub.example.com/endpoint',
topic='http://publisher.example.com/happycats.xml',
).count(), 1)
self.assertEquals(len(self.requests), 1)
self.assertEquals(self.requests[0][0],
'http://myhub.example.com/endpoint')
callback_url = urlparse.urljoin(
'http://testserver',
reverse('pubsubhubbub_callback', args=(sub.pk,)),
)
self.assertEquals(self.requests[0][1]['callback'], callback_url)
self.assert_((self.requests[0][1]['lease_seconds'] - 86400) < 5)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python wrapper around the C extension for the pair counter in
``theory/DDsmu/``. This wrapper is in :py:mod:`Corrfunc.theory.DDsmu`
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__author__ = ('Manodeep Sinha', 'Nick Hand')
__all__ = ('DDsmu', )
def DDsmu(autocorr, nthreads, binfile, mu_max, nmu_bins,
X1, Y1, Z1, weights1=None, periodic=True, boxsize=None,
X2=None, Y2=None, Z2=None, weights2=None,
verbose=False, output_savg=False,
fast_divide_and_NR_steps=0,
xbin_refine_factor=2, ybin_refine_factor=2,
zbin_refine_factor=1, max_cells_per_dim=100,
copy_particles=True, enable_min_sep_opt=True,
c_api_timer=False, isa=r'fastest', weight_type=None):
"""
Calculate the 2-D pair-counts corresponding to the redshift-space
correlation function, :math:`\\xi(s, \mu)` Pairs which are separated
by less than the ``s`` bins (specified in ``binfile``) in 3-D, and
less than ``s*mu_max`` in the Z-dimension are counted.
If ``weights`` are provided, the mean pair weight is stored in the
``"weightavg"`` field of the results array. The raw pair counts in the
``"npairs"`` field are not weighted. The weighting scheme depends on
``weight_type``.
.. note:: This module only returns pair counts and not the actual
correlation function :math:`\\xi(s, \mu)`. See the
utilities :py:mod:`Corrfunc.utils.convert_3d_counts_to_cf`
for computing :math:`\\xi(s, \mu)` from the pair counts.
.. versionadded:: 2.1.0
Parameters
----------
autocorr: boolean, required
Boolean flag for auto/cross-correlation. If autocorr is set to 1,
then the second set of particle positions are not required.
nthreads: integer
The number of OpenMP threads to use. Has no effect if OpenMP was not
enabled during library compilation.
binfile: string or an list/array of floats
For string input: filename specifying the ``s`` bins for
``DDsmu_mocks``. The file should contain white-space separated values
of (smin, smax) specifying each ``s`` bin wanted. The bins
need to be contiguous and sorted in increasing order (smallest bins
come first).
For array-like input: A sequence of ``s`` values that provides the
bin-edges. For example,
``np.logspace(np.log10(0.1), np.log10(10.0), 15)`` is a valid
input specifying **14** (logarithmic) bins between 0.1 and 10.0. This
array does not need to be sorted.
mu_max: double. Must be in range (0.0, 1.0]
A double-precision value for the maximum cosine of the angular
separation from the line of sight (LOS). Here, LOS is taken to be
along the Z direction.
Note: Only pairs with :math:`0 <= \cos(\\theta_{LOS}) < \mu_{max}`
are counted (no equality).
nmu_bins: int
The number of linear ``mu`` bins, with the bins ranging from
from (0, :math:`\mu_{max}`)
X1/Y1/Z1 : array-like, real (float/double)
The array of X/Y/Z positions for the first set of points.
Calculations are done in the precision of the supplied arrays.
weights1: array_like, real (float/double), optional
A scalar, or an array of weights of shape (n_weights, n_positions) or
(n_positions,). ``weight_type`` specifies how these weights are used;
results are returned in the ``weightavg`` field. If only one of
weights1 and weights2 is specified, the other will be set to uniform
weights.
periodic : boolean
Boolean flag to indicate periodic boundary conditions.
boxsize : double, required if ``periodic=True``
The side-length of the cube in the cosmological simulation.
Present to facilitate exact calculations for periodic wrapping.
If boxsize is 0., then the wrapping is done based on
the maximum difference within each dimension of the X/Y/Z arrays.
.. versionchanged:: 2.4.0
Required if ``periodic=True``.
X2/Y2/Z2 : array-like, real (float/double)
Array of XYZ positions for the second set of points. *Must* be the same
precision as the X1/Y1/Z1 arrays. Only required when ``autocorr==0``.
weights2: array-like, real (float/double), optional
Same as weights1, but for the second set of positions
verbose : boolean (default false)
Boolean flag to control output of informational messages
output_savg : boolean (default false)
Boolean flag to output the average ``s`` for each bin. Code will
run slower if you set this flag. Also, note, if you are calculating
in single-precision, ``s`` will suffer from numerical loss of
precision and can not be trusted. If you need accurate ``s``
values, then pass in double precision arrays for the particle
positions.
fast_divide_and_NR_steps: integer (default 0)
Replaces the division in ``AVX`` implementation with an approximate
reciprocal, followed by ``fast_divide_and_NR_steps`` of Newton-Raphson.
Can improve runtime by ~15-20% on older computers. Value of 0 uses
the standard division operation.
(xyz)bin_refine_factor: integer (default (2,2,1) typical values in [1-3])
Controls the refinement on the cell sizes. Can have up to a 20% impact
on runtime.
max_cells_per_dim: integer (default 100, typical values in [50-300])
Controls the maximum number of cells per dimension. Total number of
cells can be up to (max_cells_per_dim)^3. Only increase if ``rmax`` is
too small relative to the boxsize (and increasing helps the runtime).
copy_particles: boolean (default True)
Boolean flag to make a copy of the particle positions
If set to False, the particles will be re-ordered in-place
.. versionadded:: 2.3.0
enable_min_sep_opt: boolean (default true)
Boolean flag to allow optimizations based on min. separation between
pairs of cells. Here to allow for comparison studies.
.. versionadded:: 2.3.0
c_api_timer : boolean (default false)
Boolean flag to measure actual time spent in the C libraries. Here
to allow for benchmarking and scaling studies.
isa: string (default ``fastest``)
Controls the runtime dispatch for the instruction set to use. Options
are: [``fastest``, ``avx512f``, ``avx``, ``sse42``, ``fallback``]
Setting isa to ``fastest`` will pick the fastest available instruction
set on the current computer. However, if you set ``isa`` to, say,
``avx`` and ``avx`` is not available on the computer, then the code
will revert to using ``fallback`` (even though ``sse42`` might be
available). Unless you are benchmarking the different instruction
sets, you should always leave ``isa`` to the default value. And if
you *are* benchmarking, then the string supplied here gets translated
into an ``enum`` for the instruction set defined in ``utils/defs.h``.
weight_type : str, optional
The type of pair weighting to apply.
Options: "pair_product", None; Default: None.
Returns
--------
results : A python list
A python list containing ``nmu_bins`` of [smin, smax, savg, mu_max,
npairs, weightavg] for each spatial bin specified in the ``binfile``.
There will be a total of ``nmu_bins`` ranging from [0, ``mu_max``)
*per* spatial bin. If ``output_savg`` is not set, then ``savg`` will
be set to 0.0 for all bins; similarly for ``weight_avg``. ``npairs``
contains the number of pairs in that bin.
api_time: float, optional
Only returned if ``c_api_timer`` is set. ``api_time`` measures only
the time spent within the C library and ignores all python overhead.
Example
-------
>>> from __future__ import print_function
>>> import numpy as np
>>> from os.path import dirname, abspath, join as pjoin
>>> import Corrfunc
>>> from Corrfunc.theory.DDsmu import DDsmu
>>> binfile = pjoin(dirname(abspath(Corrfunc.__file__)),
... "../theory/tests/", "bins")
>>> N = 10000
>>> boxsize = 420.0
>>> nthreads = 4
>>> autocorr = 1
>>> mu_max = 1.0
>>> seed = 42
>>> nmu_bins = 10
>>> np.random.seed(seed)
>>> X = np.random.uniform(0, boxsize, N)
>>> Y = np.random.uniform(0, boxsize, N)
>>> Z = np.random.uniform(0, boxsize, N)
>>> weights = np.ones_like(X)
>>> results = DDsmu(autocorr, nthreads, binfile, mu_max, nmu_bins,
... X, Y, Z, weights1=weights, weight_type='pair_product',
... output_savg=True, boxsize=boxsize, periodic=True)
>>> for r in results[100:]: print("{0:10.6f} {1:10.6f} {2:10.6f} {3:10.1f}"
... " {4:10d} {5:10.6f}".format(r['smin'], r['smax'],
... r['savg'], r['mu_max'], r['npairs'], r['weightavg']))
... # doctest: +NORMALIZE_WHITESPACE
5.788530 8.249250 7.149762 0.1 230 1.000000
5.788530 8.249250 7.158884 0.2 236 1.000000
5.788530 8.249250 7.153403 0.3 210 1.000000
5.788530 8.249250 7.091504 0.4 254 1.000000
5.788530 8.249250 7.216417 0.5 182 1.000000
5.788530 8.249250 7.120980 0.6 222 1.000000
5.788530 8.249250 7.086361 0.7 238 1.000000
5.788530 8.249250 7.199075 0.8 170 1.000000
5.788530 8.249250 7.128768 0.9 208 1.000000
5.788530 8.249250 6.973382 1.0 206 1.000000
8.249250 11.756000 10.147488 0.1 590 1.000000
8.249250 11.756000 10.216417 0.2 634 1.000000
8.249250 11.756000 10.195979 0.3 532 1.000000
8.249250 11.756000 10.248775 0.4 544 1.000000
8.249250 11.756000 10.091439 0.5 530 1.000000
8.249250 11.756000 10.282170 0.6 642 1.000000
8.249250 11.756000 10.245368 0.7 666 1.000000
8.249250 11.756000 10.139694 0.8 680 1.000000
8.249250 11.756000 10.190839 0.9 566 1.000000
8.249250 11.756000 10.241730 1.0 606 1.000000
11.756000 16.753600 14.553911 0.1 1736 1.000000
11.756000 16.753600 14.576144 0.2 1800 1.000000
11.756000 16.753600 14.595632 0.3 1798 1.000000
11.756000 16.753600 14.477071 0.4 1820 1.000000
11.756000 16.753600 14.479887 0.5 1740 1.000000
11.756000 16.753600 14.492835 0.6 1748 1.000000
11.756000 16.753600 14.546800 0.7 1720 1.000000
11.756000 16.753600 14.467235 0.8 1750 1.000000
11.756000 16.753600 14.541123 0.9 1798 1.000000
11.756000 16.753600 14.445188 1.0 1826 1.000000
16.753600 23.875500 20.722545 0.1 5088 1.000000
16.753600 23.875500 20.730212 0.2 5000 1.000000
16.753600 23.875500 20.717056 0.3 5166 1.000000
16.753600 23.875500 20.727119 0.4 5014 1.000000
16.753600 23.875500 20.654365 0.5 5094 1.000000
16.753600 23.875500 20.695877 0.6 5082 1.000000
16.753600 23.875500 20.729774 0.7 4900 1.000000
16.753600 23.875500 20.718821 0.8 4874 1.000000
16.753600 23.875500 20.750061 0.9 4946 1.000000
16.753600 23.875500 20.723266 1.0 5066 1.000000
"""
try:
from Corrfunc._countpairs import countpairs_s_mu as DDsmu_extn
except ImportError:
msg = "Could not import the C extension for the 3-D "\
"redshift-space pair counter."
raise ImportError(msg)
import numpy as np
from Corrfunc.utils import translate_isa_string_to_enum,\
return_file_with_rbins, convert_to_native_endian,\
sys_pipes, process_weights
from future.utils import bytes_to_native_str
# Check if mu_max is scalar
if not np.isscalar(mu_max):
msg = "The parameter `mu_max` = {0}, has size = {1}. "\
"The code is expecting a scalar quantity (and not "\
"not a list, array)".format(mu_max, np.size(mu_max))
raise TypeError(msg)
# Check that mu_max is within (0.0, 1.0]
if mu_max <= 0.0 or mu_max > 1.0:
msg = "The parameter `mu_max` = {0}, is the max. of cosine of an "\
"angle and should be within (0.0, 1.0]".format(mu_max)
raise ValueError(msg)
if not autocorr:
if X2 is None or Y2 is None or Z2 is None:
msg = "Must pass valid arrays for X2/Y2/Z2 for "\
"computing cross-correlation"
raise ValueError(msg)
if periodic and boxsize is None:
raise ValueError("Must specify a boxsize if periodic=True")
weights1, weights2 = process_weights(weights1, weights2, X1, X2, weight_type, autocorr)
# Ensure all input arrays are native endian
X1, Y1, Z1, weights1, X2, Y2, Z2, weights2 = [
convert_to_native_endian(arr, warn=True) for arr in
[X1, Y1, Z1, weights1, X2, Y2, Z2, weights2]]
# Passing None parameters breaks the parsing code, so avoid this
kwargs = {}
for k in ['weights1', 'weights2', 'weight_type',
'X2', 'Y2', 'Z2', 'boxsize']:
v = locals()[k]
if v is not None:
kwargs[k] = v
integer_isa = translate_isa_string_to_enum(isa)
sbinfile, delete_after_use = return_file_with_rbins(binfile)
with sys_pipes():
extn_results = DDsmu_extn(autocorr, nthreads,
sbinfile,
mu_max, nmu_bins,
X1, Y1, Z1,
periodic=periodic,
verbose=verbose,
output_savg=output_savg,
fast_divide_and_NR_steps=fast_divide_and_NR_steps,
xbin_refine_factor=xbin_refine_factor,
ybin_refine_factor=ybin_refine_factor,
zbin_refine_factor=zbin_refine_factor,
max_cells_per_dim=max_cells_per_dim,
copy_particles=copy_particles,
enable_min_sep_opt=enable_min_sep_opt,
c_api_timer=c_api_timer,
isa=integer_isa, **kwargs)
if extn_results is None:
msg = "RuntimeError occurred"
raise RuntimeError(msg)
else:
extn_results, api_time = extn_results
if delete_after_use:
import os
os.remove(sbinfile)
results_dtype = np.dtype([(bytes_to_native_str(b'smin'), np.float64),
(bytes_to_native_str(b'smax'), np.float64),
(bytes_to_native_str(b'savg'), np.float64),
(bytes_to_native_str(b'mu_max'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float64),
])
results = np.array(extn_results, dtype=results_dtype)
if not c_api_timer:
return results
else:
return results, api_time
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import compute_class_weight
from sklearn.utils.fixes import sp_version
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.model_selection import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modifed dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag']:
max_iter = 100 if solver == 'sag' else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-2, cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
msg = ("In LogisticRegressionCV the liblinear solver cannot handle "
"multiclass with class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set class_weight='balanced'")
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raise_message(ValueError, msg, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1")
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1")
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_multinomial_logistic_regression_with_classweight_auto():
X, y = iris.data, iris.target
model = LogisticRegression(multi_class='multinomial',
class_weight='auto', solver='lbfgs')
# 'auto' is deprecated and will be removed in 0.19
assert_warns_message(DeprecationWarning,
"class_weight='auto' heuristic is deprecated",
model.fit, X, y)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=1000, tol=1e-6,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
|
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torch.optim import lr_scheduler
import torch.nn.parallel
import numpy as np
###############################################################################
# Functions
###############################################################################
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def weights_init(m):
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif layer_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
# netG = DCGAN_G(256, 256, 3, 64, 1, 1)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(device_id=gpu_ids[0])
init_weights(netG, init_type=init_type)
# netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
# netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
# netD = DCGAN_D_fastai(256, 6, 64, 1, 1)
netD = DCGAN_D_wgan(256, 256, 6, 64, 1, 1)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
# init_weights(netD, init_type=init_type)
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
# if type(norm_layer) == functools.partial:
# use_bias = norm_layer.func == nn.InstanceNorm2d
# else:
# use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
# kernel_size=kw, stride=2, padding=padw, bias=use_bias),
kernel_size=kw, stride=2, padding=padw),
# norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
# kernel_size=kw, stride=1, padding=padw, bias=use_bias),
kernel_size=kw, stride=1, padding=padw),
# norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
# sequence += [nn.Sigmoid()]
# sequence += [nn.Tanh()]
pass
self.model = nn.Sequential(*sequence)
def forward(self, input):
# if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
# return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
# else:
return self.model(input)
class DCGAN_D_fastai(nn.Module):
def conv_block(self, main, name, inf, of, a, b, c, bn=True):
main.add_module(f'{name}-{inf}.{of}.conv', nn.Conv2d(inf, of, a, b, c, bias=False))
# main.add_module(f'{name}-{of}.batchnorm', nn.BatchNorm2d(of))
main.add_module(f'{name}-{of}.relu', nn.LeakyReLU(0.2, inplace=True))
def __init__(self, isize, nc, ndf, ngpu, n_extra_layers=0):
super(DCGAN_D_fastai, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
main = nn.Sequential()
# input is nc x isize x isize
self.conv_block(main, 'initial', nc, ndf, 4, 2, 1, False)
csize, cndf = isize / 2, ndf
for t in range(n_extra_layers):
self.conv_block(main, f'extra-{t}', cndf, cndf, 3, 1, 1)
while csize > 4:
self.conv_block(main, 'pyramid', cndf, cndf*2, 4, 2, 1)
cndf *= 2; csize /= 2
# state size. K x 4 x 4
main.add_module(f'final.{cndf}-1.conv', nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main
def forward(self, input):
# gpu_ids = None
# if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
# gpu_ids = range(self.ngpu)
# output = nn.parallel.data_parallel(self.main, input, gpu_ids)
# output = output.mean(0)
# return output.view(1)
output = self.main(input)
output = output.mean(0)
return output.view(1)
class DCGAN_D_wgan(nn.Module):
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):
super(DCGAN_D_wgan, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
main = nn.Sequential()
# input is nc x isize x isize
main.add_module('initial.conv.{0}-{1}'.format(nc, ndf),
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
main.add_module('initial.relu.{0}'.format(ndf),
nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize / 2, ndf
# Extra layers
for t in range(n_extra_layers):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf),
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cndf),
nn.BatchNorm2d(cndf))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True))
while csize > 4:
in_feat = cndf
out_feat = cndf * 2
main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat),
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
main.add_module('pyramid.{0}.batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
main.add_module('pyramid.{0}.relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True))
cndf = cndf * 2
csize = csize / 2
# state size. K x 4 x 4
main.add_module('final.{0}-{1}.conv'.format(cndf, 1),
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output = output.mean(0)
return output.view(1)
class DCGAN_G(nn.Module):
def deconv_block(self, main, name, inf, of, a, b, c, bn=True):
main.add_module(f'{name}-{inf}.{of}.convt', nn.ConvTranspose2d(inf, of, a, b, c, bias=False))
main.add_module(f'{name}-{of}.batchnorm', nn.BatchNorm2d(of))
main.add_module(f'{name}-{of}.relu', nn.ReLU(inplace=True))
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(DCGAN_G, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
cngf, tisize = ngf // 2, 4
while tisize != isize:
cngf *= 2
tisize *= 2
main = nn.Sequential()
self.deconv_block(main, 'initial', nz, cngf, 4, 1, 0)
csize, cndf = 4, cngf
while csize < isize // 2:
self.deconv_block(main, 'pyramid', cngf, cngf // 2, 4, 2, 1)
cngf //= 2
csize *= 2
for t in range(n_extra_layers):
self.deconv_block(main, f'extra-{t}', cngf, cngf, 3, 1, 1)
main.add_module(f'final.{cngf}-{nc}.convt', nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module(f'final.{nc}.tanh', nn.Tanh())
self.main = main
def forward(self, input):
return self.main(input)
|
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'gaprice_SPAdes'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from gaprice_SPAdes.gaprice_SPAdesImpl import gaprice_SPAdes
impl_gaprice_SPAdes = gaprice_SPAdes(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['gaprice_SPAdes.run_SPAdes_async'] = ['gaprice_SPAdes', 'run_SPAdes']
async_check_methods['gaprice_SPAdes.run_SPAdes_check'] = ['gaprice_SPAdes', 'run_SPAdes']
sync_methods['gaprice_SPAdes.run_SPAdes'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'gaprice_SPAdes'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_gaprice_SPAdes.run_SPAdes,
name='gaprice_SPAdes.run_SPAdes',
types=[dict])
self.method_authentication['gaprice_SPAdes.run_SPAdes'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"gaprice_SPAdes but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
|
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/jupyter/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = 'sha1:6c1a5cca33dc:30f31ede1973570aa5e471d9d5537852a5f9386b'
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8000
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
## https://github.com/jbwhit/til/blob/master/jupyter/autosave_html_py.md
import os
from subprocess import check_call
from queue import Queue
from threading import Thread
import nbformat
from tempfile import TemporaryFile
class PostSave:
__queue = Queue()
def __init__(self):
t = Thread(target=self.__worker)
t.start()
def __worker(self):
while True:
args, kwargs = self.__queue.get()
self.__convert(*args, **kwargs)
self.__queue.task_done()
@staticmethod
def __convert(model, os_path, contents_manager):
d, fname = os.path.split(os_path)
if model['type'] == 'notebook':
check_call(['jupyter', 'nbconvert', '--to', 'html', fname], cwd=d)
def __call__(self, *args, **kwargs):
self.__queue.put((args, kwargs))
# Convert .ipynb files into .html after each save.
c.FileContentsManager.post_save_hook = PostSave()
|
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import serial
import time
import re
def parse_visa_resource_string(resource_string):
# valid resource strings:
# ASRL1::INSTR
# ASRL::COM1::INSTR
# ASRL::COM1,9600::INSTR
# ASRL::COM1,9600,8n1::INSTR
# ASRL::/dev/ttyUSB0::INSTR
# ASRL::/dev/ttyUSB0,9600::INSTR
# ASRL::/dev/ttyUSB0,9600,8n1::INSTR
m = re.match('^(?P<prefix>(?P<type>ASRL)\d*)(::(?P<arg1>[^\s:]+))?(::(?P<suffix>INSTR))$',
resource_string, re.I)
if m is not None:
return dict(
type = m.group('type').upper(),
prefix = m.group('prefix'),
arg1 = m.group('arg1'),
suffix = m.group('suffix'),
)
class SerialInstrument:
"Serial instrument interface client"
def __init__(self, port = None, baudrate=9600, bytesize=8, paritymode=0, stopbits=1, timeout=None,
xonxoff=False, rtscts=False, dsrdtr=False):
if port.upper().startswith("ASRL") and '::' in port:
res = parse_visa_resource_string(port)
if res is None:
raise IOError("Invalid resource string")
index = res['prefix'][4:]
if len(index) > 0:
port = int(index)
else:
# port[,baud[,nps]]
# n = data bits (5,6,7,8)
# p = parity (n,o,e,m,s)
# s = stop bits (1,1.5,2)
t = res['arg1'].split(',')
port = t[0]
if len(t) > 1:
baudrate = int(t[1])
self.serial = serial.Serial(port)
self.term_char = '\n'
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.paritymode = paritymode
self.stopbits = stopbits
self.timeout = timeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.wait_dsr = False
self.message_delay = 0
self.update_settings()
def update_settings(self):
self.serial.baudrate = self.baudrate
if self.bytesize == 5:
self.serial.bytesize = serial.FIVEBITS
elif self.bytesize == 6:
self.serial.bytesize = serial.SIXBITS
elif self.bytesize == 7:
self.serial.bytesize = serial.SEVENBITS
else:
self.serial.bytesize = serial.EIGHTBITS
if self.paritymode == 1:
self.serial.paritymode = serial.PARITY_ODD
elif self.paritymode == 2:
self.serial.paritymode = serial.PARITY_EVEN
elif self.paritymode == 3:
self.serial.paritymode = serial.PARITY_MARK
elif self.paritymode == 4:
self.serial.paritymode = serial.PARITY_SPACE
else:
self.serial.paritymode = serial.PARITY_NONE
if self.stopbits == 1.5:
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
elif self.stopbits == 2:
self.serial.stopbits = serial.STOPBITS_TWO
else:
self.serial.stopbits = serial.STOPBITS_ONE
self.serial.timeout = self.timeout
self.serial.xonxoff = self.xonxoff
self.serial.rtscts = self.rtscts
self.serial.dsrdtr = self.dsrdtr
if self.dsrdtr:
self.wait_dsr = True
self.message_delay = 0.1
def write_raw(self, data):
"Write binary data to instrument"
if self.term_char is not None:
data += str(self.term_char).encode('utf-8')[0:1]
self.serial.write(data)
if self.message_delay > 0:
time.sleep(self.message_delay)
if self.wait_dsr:
while not self.serial.getDSR():
time.sleep(0.01)
def read_raw(self, num=-1):
"Read binary data from instrument"
data = b''
term_char = str(self.term_char).encode('utf-8')[0:1]
while True:
c = self.serial.read(1)
data += c
num -= 1
if c == term_char:
break
if num == 0:
break
return data
def ask_raw(self, data, num=-1):
"Write then read binary data"
self.write_raw(data)
return self.read_raw(num)
def write(self, message, encoding = 'utf-8'):
"Write string to instrument"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
for message_i in message:
self.write(message_i, encoding)
return
self.write_raw(str(message).encode(encoding))
def read(self, num=-1, encoding = 'utf-8'):
"Read string from instrument"
return self.read_raw(num).decode(encoding).rstrip('\r\n')
def ask(self, message, num=-1, encoding = 'utf-8'):
"Write then read string"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
val = list()
for message_i in message:
val.append(self.ask(message_i, num, encoding))
return val
self.write(message, encoding)
return self.read(num, encoding)
def read_stb(self):
"Read status byte"
raise NotImplementedError()
def trigger(self):
"Send trigger command"
self.write("*TRG")
def clear(self):
"Send clear command"
self.write("*CLS")
def remote(self):
"Send remote command"
raise NotImplementedError()
def local(self):
"Send local command"
raise NotImplementedError()
def lock(self):
"Send lock command"
raise NotImplementedError()
def unlock(self):
"Send unlock command"
raise NotImplementedError()
|
|
import boto3
import re
from flask import render_template
from flask_mail import Message
from boto3.dynamodb.conditions import Key, Attr
from utils.ResponseCreation import ControllerResponse
from utils import db_utils as dbUtils
import utils.MentiiLogging as MentiiLogging
import uuid
import hashlib
import class_ctrl as class_ctrl
from flask import g
def sendForgotPasswordEmail(httpOrigin, jsonData, mailer, dbInstance):
email = jsonData.get('email', None)
resetPasswordId = str(uuid.uuid4())
success = addResetPasswordIdToUser(email, resetPasswordId, dbInstance)
if success == True:
host = getProperEnvironment(httpOrigin)
url = host + '/reset-password/{0}'.format(resetPasswordId)
message = render_template('forgotPasswordEmail.html', url=url)
#Build Message
msg = Message('Mentii: Reset Password', recipients=[email], extra_headers={'Content-Transfer-Encoding': 'quoted-printable'}, html=message)
#Send Email
mailer.send(msg)
def addResetPasswordIdToUser(email, resetPasswordId, dbInstance):
success = False;
table = dbUtils.getTable('users', dbInstance)
if table is not None:
user = getUserByEmail(email,dbInstance)
if user is not None:
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET resetPasswordId = :a',
'ExpressionAttributeValues': { ':a': resetPasswordId },
'ReturnValues' : 'UPDATED_NEW'
}
dbUtils.updateItem(jsonData, table)
success = True
return success
def resetUserPassword(jsonData, dbInstance):
response = ControllerResponse()
email = jsonData.get('email', None)
password = jsonData.get('password', None)
resetPasswordId = jsonData.get('id', None)
if email is not None and password is not None and resetPasswordId is not None:
res = updatePasswordForEmailAndResetId(email, password, resetPasswordId, dbInstance)
if res is not None:
response.addToPayload('status', 'Success')
else:
response.addError('Failed to Reset Password', 'We were unable to update the password for this account.')
else:
response.addError('Failed to Reset Password', 'We were unable to update the password for this account.')
return response
def updatePasswordForEmailAndResetId(email, password, resetPasswordId, dbInstance):
res = None
user = getUserByEmail(email, dbInstance)
if user is not None:
storedResetPasswordId = user.get('resetPasswordId', None)
if storedResetPasswordId == resetPasswordId:
table = dbUtils.getTable('users', dbInstance)
if table is not None:
hashedPassword = hashPassword(password)
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET password = :a REMOVE resetPasswordId',
'ExpressionAttributeValues': { ':a': hashedPassword },
'ReturnValues' : 'UPDATED_NEW'
}
res = dbUtils.updateItem(jsonData, table)
return res
def getProperEnvironment(httpOrigin):
host = ''
if httpOrigin.find('stapp') != -1:
host = 'http://stapp.mentii.me'
elif httpOrigin.find('app') != -1:
host = 'http://app.mentii.me'
else:
host = 'http://localhost:3000'
return host
def register(httpOrigin, jsonData, mailer, dbInstance):
response = ControllerResponse()
if not validateRegistrationJSON(jsonData):
response.addError('Register Validation Error', 'The json data did not have an email or did not have a password')
else:
email = parseEmail(jsonData)
password = parsePassword(jsonData)
if not isEmailValid(email):
response.addError('Email invalid', 'The email is invalid')
if not isPasswordValid(password):
response.addError('Password Invalid', 'The password is invalid')
if isEmailInSystem(email, dbInstance) and isUserActive(getUserByEmail(email, dbInstance)):
response.addError('Registration Failed', 'We were unable to register this user')
if not response.hasErrors():
hashedPassword = hashPassword(parsePassword(jsonData))
activationId = addUserAndSendEmail(httpOrigin, email, hashedPassword, mailer, dbInstance)
if activationId is None:
response.addError('Activation Id is None', 'Could not create an activation Id')
return response
def hashPassword(password):
return hashlib.md5( password ).hexdigest()
def validateRegistrationJSON(jsonData):
'''
Validate that the JSON object contains
an email and password attributes
'''
if jsonData is not None:
return 'password' in jsonData.keys() and 'email' in jsonData.keys()
return False
def parseEmail(jsonData):
try:
email = jsonData['email']
return email
except Exception as e:
MentiiLogging.getLogger().exception(e)
return None
def parsePassword(jsonData):
try:
password = jsonData['password']
return password
except Exception as e:
MentiiLogging.getLogger().exception(e)
return None
def isEmailValid(email):
'''
Validate that thee email is matches the
format required.
'''
emailRegex = re.compile(r"[^@]+@[^@]+\.[^@]+")
return emailRegex.match(email) is not None
def isPasswordValid(password):
return len(password) >= 8
def addUserAndSendEmail(httpOrigin, email, password, mailer, dbInstance):
activationId = str(uuid.uuid4())
table = dbUtils.getTable('users', dbInstance)
jsonData = {
'email': email,
'password': password,
'activationId': activationId,
'active': 'F',
'userRole' : "student"
}
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in addUserAndSendEmail')
activationId = None
#This will change an existing user with the same email.
response = dbUtils.putItem(jsonData,table)
if response is None:
MentiiLogging.getLogger().error('Unable to add user to table users in addUserAndSendEmail')
activationId = None
try:
sendEmail(httpOrigin, email, activationId, mailer)
except Exception as e:
MentiiLogging.getLogger().exception(e)
return activationId
def deleteUser(email, dbInstance):
table = dbUtils.getTable('users', dbInstance)
key = {'email': email}
response = dbUtils.deleteItem(key, table)
return response
def sendEmail(httpOrigin, email, activationId, mailer):
'''
Create a message and send it from our email to
the passed in email. The message should contain
a link built with the activationId
'''
if activationId is None:
return
#Change the URL to the appropriate environment
host = getProperEnvironment(httpOrigin)
url = host + '/activation/{0}'.format(activationId)
message = render_template('registrationEmail.html', url=url)
#Build Message
msg = Message('Mentii: Thank You for Creating an Account!', recipients=[email],
extra_headers={'Content-Transfer-Encoding': 'quoted-printable'}, html=message)
#Send Email
mailer.send(msg)
def isEmailInSystem(email, dbInstance):
user = getUserByEmail(email, dbInstance)
return user != None and 'email' in user.keys()
def activate(activationId, dbInstance):
response = ControllerResponse()
table = dbUtils.getTable('users', dbInstance)
items = []
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in activate')
response.addError('Could not access table. Error', 'The DB did not give us the table')
return response
#Scan for the email associated with this activationId
scanResponse = dbUtils.scanFilter('activationId', activationId, table)
if scanResponse is not None:
#scanResponse is a dictionary that has a list of 'Items'
items = scanResponse['Items']
if not items or 'email' not in items[0].keys():
response.addError('No user with activationid', 'The DB did not return a user with the passed in activationId')
else:
email = items[0]['email']
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET active = :a',
'ExpressionAttributeValues': { ':a': 'T' },
'ReturnValues' : 'UPDATED_NEW'
}
#Update using the email we have
res = dbUtils.updateItem(jsonData, table)
response.addToPayload('status', 'Success')
return response
def isUserActive(user):
return user != None and 'active' in user.keys() and user['active'] == 'T'
def getUserByEmail(email, dbInstance):
user = None
table = dbUtils.getTable('users', dbInstance)
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in getUserByEmail')
else:
key = {'Key' : {'email': email}}
result = dbUtils.getItem(key, table)
if result is None:
MentiiLogging.getLogger().error('Unable to get the user with email: ' + email + ' in getUserByEmail ')
elif 'Item' in result.keys():
user = result['Item']
return user
def changeUserRole(jsonData, dbInstance, adminRole=None):
response = ControllerResponse()
#g will be not be available during testing
#and adminRole will need to be passed to the function
if g: # pragma: no cover
adminRole = g.authenticatedUser['userRole']
#adminRole is confirmed here incase changeUserRole is called from somewhere
#other than app.py changeUserRole()
if adminRole != 'admin':
response.addError('Role Error', 'Only admins can change user roles')
elif 'email' not in jsonData.keys() or 'userRole' not in jsonData.keys():
response.addError('Key Missing Error', 'Email or role missing from json data')
else:
email = jsonData['email']
userRole = jsonData['userRole']
userTable = dbUtils.getTable('users', dbInstance)
if userTable is None:
MentiiLogging.getLogger().error('Unable to get table "users" in changeUserRole')
response.addError('No Access to Data', 'Unable to get data from database')
else:
if userRole != 'student' and userRole != 'teacher' and userRole != 'admin':
MentiiLogging.getLogger().error('Invalid role: ' + userRole + ' specified. Unable to change user role')
response.addError('Invalid Role Type', 'Invaid role specified')
else:
data = {
'Key': {'email': email},
'UpdateExpression': 'SET userRole = :ur',
'ExpressionAttributeValues': { ':ur': userRole },
'ReturnValues' : 'UPDATED_NEW'
}
result = dbUtils.updateItem(data, userTable)
if result is None:
MentiiLogging.getLogger().error('Unable to update the user with email: ' + email + ' in changeUserRole')
response.addError('Result Update Error', 'Could not update the user role in database')
else:
response.addToPayload('Result:', result)
response.addToPayload('success', 'true')
return response
def getRole(userEmail, dynamoDBInstance):
'''
Returns the role of the user whose email is pased. If we are unable to get
this information from the DB the role None is returned. Calling code must
grant only student permissions in this case.
'''
userRole = None
table = dbUtils.getTable('users', dynamoDBInstance)
if table is None:
MentiiLogging.getLogger().error('Could not get user table in getUserRole')
else:
request = {"Key" : {"email": userEmail}, "ProjectionExpression": "userRole"}
res = dbUtils.getItem(request, table)
if res is None or 'Item' not in res:
MentiiLogging.getLogger().error('Could not get role for user ' + userEmail)
else:
userRole = res['Item']['userRole']
return userRole
def joinClass(jsonData, dynamoDBInstance, email=None, userRole=None):
response = ControllerResponse()
#g will be not be available during testing
#and email will need to be passed to the function
if g: # pragma: no cover
email = g.authenticatedUser['email']
userRole = g.authenticatedUser['userRole']
if 'code' not in jsonData.keys() or not jsonData['code']:
response.addError('Key Missing Error', 'class code missing from data')
elif userRole == 'teacher' or userRole == 'admin':
if class_ctrl.isCodeInTaughtList(jsonData, dynamoDBInstance, email):
response.addError('Role Error', 'Teachers cannot join their taught class as a student')
else:
classCode = jsonData['code']
addDataToClassAndUser(classCode, email, response, dynamoDBInstance)
else:
classCode = jsonData['code']
addDataToClassAndUser(classCode, email, response, dynamoDBInstance)
return response
def addDataToClassAndUser(classCode, email, response, dynamoDBInstance):
updatedClassCodes = addClassCodeToStudent(email, classCode, dynamoDBInstance)
if not updatedClassCodes:
response.addError('joinClass call Failed', 'Unable to update user data')
else:
updatedClass = addStudentToClass(classCode, email, dynamoDBInstance)
if not updatedClass:
response.addError('joinClass call Failed', 'Unable to update class data')
else:
response.addToPayload('title', updatedClass['title'])
response.addToPayload('code', updatedClass['code'])
def leaveClass(jsonData, dynamoDBInstance, email=None):
response = ControllerResponse()
data = None
if g: # pragma: no cover
email = g.authenticatedUser['email']
if 'code' not in jsonData.keys() or not jsonData['code']:
response.addError('Key Missing Error', 'class code missing from data')
else:
classCode = jsonData['code']
data = {
'email': email,
'classCode': classCode
}
return class_ctrl.removeStudent(dynamoDBInstance, data, response=response, userRole=None)
def addClassCodeToStudent(email, classCode, dynamoDBInstance):
userTable = dbUtils.getTable('users', dynamoDBInstance)
if userTable:
codeSet = set([classCode])
addClassToUser = {
'Key': {'email': email},
'UpdateExpression': 'ADD classCodes :i',
'ExpressionAttributeValues': { ':i': codeSet },
'ReturnValues' : 'UPDATED_NEW'
}
res = dbUtils.updateItem(addClassToUser, userTable)
if ( res and
'Attributes' in res and
'classCodes' in res['Attributes'] and
classCode in res['Attributes']['classCodes']
):
return res['Attributes']['classCodes']
return None
def addStudentToClass(classCode, email, dynamoDBInstance):
classTable = dbUtils.getTable('classes', dynamoDBInstance)
if classTable:
emailSet = set([email])
addUserToClass = {
'Key': {'code': classCode},
'UpdateExpression': 'ADD students :i',
'ExpressionAttributeValues': { ':i': emailSet },
'ReturnValues' : 'ALL_NEW'
}
res = dbUtils.updateItem(addUserToClass, classTable)
if ( res and
'Attributes' in res and
'students' in res['Attributes'] and
email in res['Attributes']['students'] and
'title' in res['Attributes']
):
return res['Attributes']
return None
|
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from swaggyjenkins.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from swaggyjenkins.exceptions import ApiAttributeError
class GithubRepositorypermissions(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'admin': (bool,), # noqa: E501
'push': (bool,), # noqa: E501
'pull': (bool,), # noqa: E501
'_class': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'admin': 'admin', # noqa: E501
'push': 'push', # noqa: E501
'pull': 'pull', # noqa: E501
'_class': '_class', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GithubRepositorypermissions - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
admin (bool): [optional] # noqa: E501
push (bool): [optional] # noqa: E501
pull (bool): [optional] # noqa: E501
_class (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GithubRepositorypermissions - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
admin (bool): [optional] # noqa: E501
push (bool): [optional] # noqa: E501
pull (bool): [optional] # noqa: E501
_class (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
|
# Copyright (c) 2014 Hoang Do, Phuc Vo, P. Michiardi, D. Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo.config import cfg
from sahara import conductor
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging
from sahara.plugins import exceptions as ex
from sahara.plugins import provisioning as p
from sahara.plugins.spark import config_helper as c_helper
from sahara.plugins.spark import edp_engine
from sahara.plugins.spark import run_scripts as run
from sahara.plugins.spark import scaling as sc
from sahara.plugins import utils
from sahara.topology import topology_helper as th
from sahara.utils import files as f
from sahara.utils import general as ug
from sahara.utils import remote
conductor = conductor.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SparkProvider(p.ProvisioningPluginBase):
def __init__(self):
self.processes = {
"HDFS": ["namenode", "datanode"],
"Spark": ["master", "slave"]
}
def get_title(self):
return "Apache Spark"
def get_description(self):
return _("This plugin provides an ability to launch Spark on Hadoop "
"CDH cluster without any management consoles.")
def get_versions(self):
return ['1.0.0', '0.9.1']
def get_configs(self, hadoop_version):
return c_helper.get_plugin_configs()
def get_node_processes(self, hadoop_version):
return self.processes
def validate(self, cluster):
nn_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "namenode")])
if nn_count != 1:
raise ex.InvalidComponentCountException("namenode", 1, nn_count)
dn_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "datanode")])
if dn_count < 1:
raise ex.InvalidComponentCountException("datanode", _("1 or more"),
nn_count)
# validate Spark Master Node and Spark Slaves
sm_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "master")])
if sm_count != 1:
raise ex.RequiredServiceMissingException("Spark master")
sl_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "slave")])
if sl_count < 1:
raise ex.InvalidComponentCountException("Spark slave",
_("1 or more"),
sl_count)
def update_infra(self, cluster):
pass
def configure_cluster(self, cluster):
self._setup_instances(cluster)
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
sm_instance = utils.get_instance(cluster, "master")
dn_instances = utils.get_instances(cluster, "datanode")
# Start the name node
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
# start the data nodes
self._start_slave_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
if sm_instance:
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info(_LI("Spark service at '%s' has been started"),
sm_instance.hostname())
LOG.info(_LI('Cluster %s has been started successfully'),
cluster.name)
self._set_cluster_info(cluster)
def _spark_home(self, cluster):
return c_helper.get_config_value("Spark", "Spark home", cluster)
def _extract_configs_to_extra(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
sp_slaves = utils.get_instances(cluster, "slave")
extra = dict()
config_master = config_slaves = ''
if sp_master is not None:
config_master = c_helper.generate_spark_env_configs(cluster)
if sp_slaves is not None:
slavenames = []
for slave in sp_slaves:
slavenames.append(slave.hostname())
config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
else:
config_slaves = "\n"
for ng in cluster.node_groups:
extra[ng.id] = {
'xml': c_helper.generate_xml_configs(
ng.configuration(),
ng.storage_paths(),
nn.hostname(), None,
),
'setup_script': c_helper.generate_hadoop_setup_script(
ng.storage_paths(),
c_helper.extract_hadoop_environment_confs(
ng.configuration())
),
'sp_master': config_master,
'sp_slaves': config_slaves
}
if c_helper.is_data_locality_enabled(cluster):
topology_data = th.generate_topology_map(
cluster, CONF.enable_hypervisor_awareness)
extra['topology_data'] = "\n".join(
[k + " " + v for k, v in topology_data.items()]) + "\n"
return extra
def _start_slave_datanode_processes(self, dn_instances):
with context.ThreadGroup() as tg:
for i in dn_instances:
tg.spawn('spark-start-dn-%s' % i.instance_name,
self._start_datanode, i)
def _start_datanode(self, instance):
with instance.remote() as r:
run.start_processes(r, "datanode")
def _setup_instances(self, cluster, instances=None):
extra = self._extract_configs_to_extra(cluster)
if instances is None:
instances = utils.get_instances(cluster)
self._push_configs_to_nodes(cluster, extra, instances)
def _push_configs_to_nodes(self, cluster, extra, new_instances):
all_instances = utils.get_instances(cluster)
with context.ThreadGroup() as tg:
for instance in all_instances:
if instance in new_instances:
tg.spawn('spark-configure-%s' % instance.instance_name,
self._push_configs_to_new_node, cluster,
extra, instance)
else:
tg.spawn('spark-reconfigure-%s' % instance.instance_name,
self._push_configs_to_existing_node, cluster,
extra, instance)
def _push_configs_to_new_node(self, cluster, extra, instance):
ng_extra = extra[instance.node_group.id]
files_hadoop = {
'/etc/hadoop/conf/core-site.xml': ng_extra['xml']['core-site'],
'/etc/hadoop/conf/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
}
sp_home = self._spark_home(cluster)
files_spark = {
os.path.join(sp_home, 'conf/spark-env.sh'): ng_extra['sp_master'],
os.path.join(sp_home, 'conf/slaves'): ng_extra['sp_slaves']
}
files_init = {
'/tmp/sahara-hadoop-init.sh': ng_extra['setup_script'],
'id_rsa': cluster.management_private_key,
'authorized_keys': cluster.management_public_key
}
# pietro: This is required because the (secret) key is not stored in
# .ssh which hinders password-less ssh required by spark scripts
key_cmd = ('sudo cp $HOME/id_rsa $HOME/.ssh/; '
'sudo chown $USER $HOME/.ssh/id_rsa; '
'sudo chmod 600 $HOME/.ssh/id_rsa')
storage_paths = instance.node_group.storage_paths()
dn_path = ' '.join(c_helper.make_hadoop_path(storage_paths,
'/dfs/dn'))
nn_path = ' '.join(c_helper.make_hadoop_path(storage_paths,
'/dfs/nn'))
hdfs_dir_cmd = ('sudo mkdir -p %(nn_path)s %(dn_path)s &&'
'sudo chown -R hdfs:hadoop %(nn_path)s %(dn_path)s &&'
'sudo chmod 755 %(nn_path)s %(dn_path)s' %
{"nn_path": nn_path, "dn_path": dn_path})
with remote.get_remote(instance) as r:
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.execute_command(
'sudo chown -R $USER:$USER %s' % sp_home
)
r.write_files_to(files_hadoop)
r.write_files_to(files_spark)
r.write_files_to(files_init)
r.execute_command(
'sudo chmod 0500 /tmp/sahara-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/sahara-hadoop-init.sh '
'>> /tmp/sahara-hadoop-init.log 2>&1')
r.execute_command(hdfs_dir_cmd)
r.execute_command(key_cmd)
if c_helper.is_data_locality_enabled(cluster):
r.write_file_to(
'/etc/hadoop/topology.sh',
f.get_file_text(
'plugins/spark/resources/topology.sh'))
r.execute_command(
'sudo chmod +x /etc/hadoop/topology.sh'
)
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
def _push_configs_to_existing_node(self, cluster, extra, instance):
node_processes = instance.node_group.node_processes
need_update_hadoop = (c_helper.is_data_locality_enabled(cluster) or
'namenode' in node_processes)
need_update_spark = ('master' in node_processes or
'slave' in node_processes)
if need_update_spark:
ng_extra = extra[instance.node_group.id]
sp_home = self._spark_home(cluster)
files = {
os.path.join(sp_home,
'conf/spark-env.sh'): ng_extra['sp_master'],
os.path.join(sp_home, 'conf/slaves'): ng_extra['sp_slaves'],
}
r = remote.get_remote(instance)
r.write_files_to(files)
if need_update_hadoop:
with remote.get_remote(instance) as r:
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
def _write_topology_data(self, r, cluster, extra):
if c_helper.is_data_locality_enabled(cluster):
topology_data = extra['topology_data']
r.write_file_to('/etc/hadoop/topology.data', topology_data)
def _push_master_configs(self, r, cluster, extra, instance):
node_processes = instance.node_group.node_processes
if 'namenode' in node_processes:
self._push_namenode_configs(cluster, r)
def _push_namenode_configs(self, cluster, r):
r.write_file_to('/etc/hadoop/dn.incl',
utils.generate_fqdn_host_names(
utils.get_instances(cluster, "datanode")))
def _set_cluster_info(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
info = {}
if nn:
address = c_helper.get_config_value(
'HDFS', 'dfs.http.address', cluster)
port = address[address.rfind(':') + 1:]
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, port)
}
info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()
if sp_master:
port = c_helper.get_config_value(
'Spark', 'Master webui port', cluster)
if port is not None:
info['Spark'] = {
'Web UI': 'http://%s:%s' % (sp_master.management_ip, port)
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
# Scaling
def validate_scaling(self, cluster, existing, additional):
self._validate_existing_ng_scaling(cluster, existing)
self._validate_additional_ng_scaling(cluster, additional)
def decommission_nodes(self, cluster, instances):
sls = utils.get_instances(cluster, "slave")
dns = utils.get_instances(cluster, "datanode")
decommission_dns = False
decommission_sls = False
for i in instances:
if 'datanode' in i.node_group.node_processes:
dns.remove(i)
decommission_dns = True
if 'slave' in i.node_group.node_processes:
sls.remove(i)
decommission_sls = True
nn = utils.get_instance(cluster, "namenode")
spark_master = utils.get_instance(cluster, "master")
if decommission_sls:
sc.decommission_sl(spark_master, instances, sls)
if decommission_dns:
sc.decommission_dn(nn, instances, dns)
def scale_cluster(self, cluster, instances):
master = utils.get_instance(cluster, "master")
r_master = remote.get_remote(master)
run.stop_spark(r_master, self._spark_home(cluster))
self._setup_instances(cluster, instances)
nn = utils.get_instance(cluster, "namenode")
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
self._start_slave_datanode_processes(instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service at '%s' has been restarted"),
master.hostname())
def _get_scalable_processes(self):
return ["datanode", "slave"]
def _validate_additional_ng_scaling(self, cluster, additional):
scalable_processes = self._get_scalable_processes()
for ng_id in additional:
ng = ug.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: %s") %
' '.join(ng.node_processes))
def _validate_existing_ng_scaling(self, cluster, existing):
scalable_processes = self._get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and ("datanode" in
ng.node_processes):
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: %s") %
' '.join(ng.node_processes))
dn_amount = len(utils.get_instances(cluster, "datanode"))
rep_factor = c_helper.get_config_value('HDFS', "dfs.replication",
cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled(
cluster.name, _("Spark plugin cannot shrink cluster because "
"there would be not enough nodes for HDFS "
"replicas (replication factor is %s)") %
rep_factor)
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpEngine.get_supported_job_types():
return edp_engine.EdpEngine(cluster)
return None
|
|
#!/usr/bin/env python
# Landsat Util
# License: CC0 1.0 Universal
import argparse
import textwrap
import json
from dateutil.parser import parse
import pycurl
from downloader import Downloader, IncorrectSceneId
from search import Search
from utils import reformat_date, convert_to_integer_list, timer, exit
from mixins import VerbosityMixin
from image import Process, FileDoesNotExist
from __init__ import __version__
DESCRIPTION = """Landsat-util is a command line utility that makes it easy to
search, download, and process Landsat imagery.
Commands:
Search:
landsat.py search [-p --pathrow] [--lat] [--lon] [-l LIMIT] [-s START] [-e END] [-c CLOUD] [-h]
optional arguments:
-p, --pathrow Paths and Rows in order separated by comma. Use quotes "001,003".
Example: path,row,path,row 001,001,190,204
--lat Latitude
--lon Longitude
-l LIMIT, --limit LIMIT
Search return results limit default is 10
-s START, --start START
Start Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-e END, --end END End Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-c CLOUD, --cloud CLOUD
Maximum cloud percentage. Default: 20 perct
-h, --help Show this help message and exit
Download:
landsat download sceneID [sceneID ...] [-h] [-b --bands]
positional arguments:
sceneID Provide Full sceneIDs. You can add as many sceneIDs as you wish
Example: landast download LC81660392014196LGN00
optional arguments:
-b --bands If you specify bands, landsat-util will try to download the band from S3.
If the band does not exist, an error is returned
-h, --help Show this help message and exit
-d, --dest Destination path
Process:
landsat.py process path [-h] [-b --bands] [-p --pansharpen]
positional arguments:
path Path to the landsat image folder or zip file
optional arguments:
-b --bands Specify bands. The bands should be written in sequence with no spaces
Default: Natural colors (432)
Example --bands 432
-p --pansharpen Whether to also pansharpen the process image.
Pansharpening takes a long time
-v, --verbose Show verbose output
-h, --help Show this help message and exit
"""
def args_options():
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
parser.add_argument('--version', action='version', version='%(prog)s version ' + __version__)
# Search Logic
parser_search = subparsers.add_parser('search',
help='Search Landsat metdata')
# Global search options
parser_search.add_argument('-l', '--limit', default=10, type=int,
help='Search return results limit\n'
'default is 100')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-c', '--cloud', type=float, default=20.0,
help='Maximum cloud percentage '
'default is 20 perct')
parser_search.add_argument('-p', '--pathrow',
help='Paths and Rows in order separated by comma. Use quotes ("001").'
'Example: path,row,path,row 001,001,190,204')
parser_search.add_argument('--lat', type=float, help='The latitude')
parser_search.add_argument('--lon', type=float, help='The longitude')
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. LC81660392014196LGN00")
parser_download.add_argument('-b', '--bands', help='If you specify bands, landsat-util will try to download '
'the band from S3. If the band does not exist, an error is returned')
parser_download.add_argument('-d', '--dest', help='Destination path')
parser_process = subparsers.add_parser('process',
help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pan sharpening takes a long time')
parser_process.add_argument('-b', '--bands', help='specify band combinations. Default is 432'
'Example: --bands 321')
parser_process.add_argument('-v', '--verbose', action='store_true',
help='Turn on verbosity')
return parser
def main(args):
"""
Main function - launches the program
"""
v = VerbosityMixin()
if args:
if args.subs == 'process':
verbose = True if args.verbose else False
try:
bands = convert_to_integer_list(args.bands)
p = Process(args.path, bands=bands, verbose=verbose)
except IOError:
exit("Zip file corrupted", 1)
except FileDoesNotExist as e:
exit(e.message, 1)
stored = p.run(args.pansharpen)
exit("The output is stored at %s" % stored)
elif args.subs == 'search':
try:
if args.start:
args.start = reformat_date(parse(args.start))
if args.end:
args.end = reformat_date(parse(args.end))
except (TypeError, ValueError):
exit("You date format is incorrect. Please try again!", 1)
s = Search()
try:
lat = float(args.lat) if args.lat else None
lon = float(args.lon) if args.lon else None
except ValueError:
exit("The latitude and longitude values must be valid numbers", 1)
result = s.search(paths_rows=args.pathrow,
lat=lat,
lon=lon,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
if result['status'] == 'SUCCESS':
v.output('%s items were found' % result['total'], normal=True, arrow=True)
if result['total'] > 100:
exit('Over 100 results. Please narrow your search', 1)
else:
v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green')
exit('Search completed!')
elif result['status'] == 'error':
exit(result['message'], 1)
elif args.subs == 'download':
d = Downloader(download_dir=args.dest)
try:
if d.download(args.scenes, convert_to_integer_list(args.bands)):
exit('Download Completed', 0)
except IncorrectSceneId:
exit('The SceneID provided was incorrect', 1)
def __main__():
global parser
parser = args_options()
args = parser.parse_args()
with timer():
main(args)
if __name__ == "__main__":
try:
__main__()
except (KeyboardInterrupt, pycurl.error):
exit('Received Ctrl + C... Exiting! Bye.', 1)
|
|
# -*- coding: utf-8 -*-
import os
import json
import uuid
import copy
import logging
import mimetypes
import warnings
from . import utils
from . import feedreader
from . import exceptions as exp
from .resource import Resource
logger = logging.getLogger(__name__)
DEFAULT_BASE_URL = os.environ.get('COUCHDB_URL', 'http://localhost:5984/')
def _id_to_path(_id):
if _id[:1] == "_":
return _id.split("/", 1)
return [_id]
def _listen_feed(object, node, feed_reader, **kwargs):
if not callable(feed_reader):
raise exp.UnexpectedError("feed_reader must be callable or class")
if isinstance(feed_reader, feedreader.BaseFeedReader):
reader = feed_reader(object)
else:
reader = feedreader.SimpleFeedReader()(object, feed_reader)
# Possible options: "continuous", "longpoll"
kwargs.setdefault("feed", "continuous")
heartbeat = int(kwargs.get('heartbeat') or 0)
timeout = (heartbeat / 1000.0) + reader.HEARTBEAT_TOLERANCE
logger.info('Listening to _changes, timeout set to %f', timeout)
(resp, result) = object.resource(node).get(
params=kwargs, stream=True, timeout=timeout)
try:
for line in resp.iter_lines():
# ignore heartbeats
if not line:
reader.on_heartbeat()
else:
reader.on_message(json.loads(utils.force_text(line)))
except exp.FeedReaderExited:
reader.on_close()
class _StreamResponse(object):
"""
Proxy object for python-requests stream response.
See more on:
http://docs.python-requests.org/en/latest/user/advanced/#streaming-requests
"""
def __init__(self, response):
self._response = response
def iter_content(self, chunk_size=1, decode_unicode=False):
return self._response.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode)
def iter_lines(self, chunk_size=512, decode_unicode=None):
return self._response.iter_lines(chunk_size=chunk_size,
decode_unicode=decode_unicode)
@property
def raw(self):
return self._response.raw
@property
def url(self):
return self._response.url
class Server(object):
"""
Class that represents a couchdb connection.
:param verify: setup ssl verification.
:param base_url: a full url to couchdb (can contain auth data).
:param full_commit: If ``False``, couchdb not commits all data on a
request is finished.
:param authmethod: specify a authentication method. By default "basic"
method is used but also exists "session" (that requires
some server configuration changes).
.. versionchanged: 1.4
Set basic auth method as default instead of session method.
.. versionchanged: 1.5
Add verify parameter for setup ssl verificaton
"""
def __init__(self, base_url=DEFAULT_BASE_URL, full_commit=True,
authmethod="basic", verify=False):
self.base_url, credentials = utils.extract_credentials(base_url)
self.resource = Resource(self.base_url, full_commit,
credentials=credentials,
authmethod=authmethod,
verify=verify)
def __repr__(self):
return '<CouchDB Server "{}">'.format(self.base_url)
def __contains__(self, name):
try:
self.resource.head(name)
except exp.NotFound:
return False
else:
return True
def __iter__(self):
(r, result) = self.resource.get('_all_dbs')
return iter(result)
def __len__(self):
(r, result) = self.resource.get('_all_dbs')
return len(result)
def info(self):
"""
Get server info.
:returns: dict with all data that couchdb returns.
:rtype: dict
"""
(r, result) = self.resource.get()
return result
def delete(self, name):
"""
Delete some database.
:param name: database name
:raises: :py:exc:`~pycouchdb.exceptions.NotFound`
if a database does not exists
"""
self.resource.delete(name)
def database(self, name):
"""
Get a database instance.
:param name: database name
:raises: :py:exc:`~pycouchdb.exceptions.NotFound`
if a database does not exists
:returns: a :py:class:`~pycouchdb.client.Database` instance
"""
(r, result) = self.resource.head(name)
if r.status_code == 404:
raise exp.NotFound("Database '{0}' does not exists".format(name))
db = Database(self.resource(name), name)
return db
def config(self):
"""
Get a current config data.
"""
(resp, result) = self.resource.get("_config")
return result
def version(self):
"""
Get the current version of a couchdb server.
"""
(resp, result) = self.resource.get()
return result["version"]
def stats(self, name=None):
"""
Get runtime stats.
:param name: if is not None, get stats identified by a name.
:returns: dict
"""
if not name:
(resp, result) = self.resource.get("_stats")
return result
resource = self.resource("_stats", "couchdb")
(r, result) = resource.get(name)
return result['couchdb'][name]
def create(self, name):
"""
Create a database.
:param name: database name
:raises: :py:exc:`~pycouchdb.exceptions.Conflict`
if a database already exists
:returns: a :py:class:`~pycouchdb.client.Database` instance
"""
(resp, result) = self.resource.put(name)
if resp.status_code in (200, 201):
return self.database(name)
def replicate(self, source, target, **kwargs):
"""
Replicate the source database to the target one.
.. versionadded:: 1.3
:param source: URL to the source database
:param target: URL to the target database
"""
data = {'source': source, 'target': target}
data.update(kwargs)
data = utils.force_bytes(utils.to_json(data))
(resp, result) = self.resource.post('_replicate', data=data)
return result
def changes_feed(self, feed_reader, **kwargs):
"""
Subscribe to changes feed of the whole CouchDB server.
Note: this method is blocking.
:param feed_reader: callable or :py:class:`~BaseFeedReader`
instance
.. [Ref] http://docs.couchdb.org/en/1.6.1/api/server/common.html#db-updates
.. versionadded: 1.10
"""
object = self
_listen_feed(object, "_db_updates", feed_reader, **kwargs)
class Database(object):
"""
Class that represents a couchdb database.
"""
def __init__(self, resource, name):
self.resource = resource
self.name = name
def __repr__(self):
return '<CouchDB Database "{}">'.format(self.name)
def __contains__(self, doc_id):
try:
(resp, result) = self.resource.head(_id_to_path(doc_id))
return resp.status_code < 206
except exp.NotFound:
return False
def config(self):
"""
Get database status data such as document count, update sequence etc.
:return: dict
"""
(resp, result) = self.resource.get()
return result
def __len__(self):
return self.config()['doc_count']
def delete(self, doc_or_id):
"""
Delete document by id.
.. versionchanged:: 1.2
Accept document or id.
:param doc_or_id: document or id
:raises: :py:exc:`~pycouchdb.exceptions.NotFound` if a document
not exists
:raises: :py:exc:`~pycouchdb.exceptions.Conflict` if delete with
wrong revision.
"""
_id = None
if isinstance(doc_or_id, dict):
if "_id" not in doc_or_id:
raise ValueError("Invalid document, missing _id attr")
_id = doc_or_id['_id']
else:
_id = doc_or_id
resource = self.resource(*_id_to_path(_id))
(r, result) = resource.head()
(r, result) = resource.delete(
params={"rev": r.headers["etag"].strip('"')})
def delete_bulk(self, docs, transaction=True):
"""
Delete a bulk of documents.
.. versionadded:: 1.2
:param docs: list of docs
:raises: :py:exc:`~pycouchdb.exceptions.Conflict` if a delete
is not success
:returns: raw results from server
"""
_docs = copy.copy(docs)
for doc in _docs:
if "_deleted" not in doc:
doc["_deleted"] = True
data = utils.force_bytes(utils.to_json({"docs": _docs}))
params = {"all_or_nothing": "true" if transaction else "false"}
(resp, results) = self.resource.post("_bulk_docs",
data=data, params=params)
for result, doc in zip(results, _docs):
if "error" in result:
raise exp.Conflict("one or more docs are not saved")
return results
def get(self, doc_id, params=None, **kwargs):
"""
Get a document by id.
.. versionadded: 1.5
Now the prefered method to pass params is via **kwargs
instead of params argument. **params** argument is now
deprecated and will be deleted in future versions.
:param doc_id: document id
:raises: :py:exc:`~pycouchdb.exceptions.NotFound` if a document
not exists
:returns: document (dict)
"""
if params:
warnings.warn("params parameter is now deprecated in favor to"
"**kwargs usage.", DeprecationWarning)
if params is None:
params = {}
params.update(kwargs)
(resp, result) = self.resource(*_id_to_path(doc_id)).get(params=params)
return result
def save(self, doc, batch=False):
"""
Save or update a document.
.. versionchanged:: 1.2
Now returns a new document instead of modify the original.
:param doc: document
:param batch: allow batch=ok inserts (default False)
:raises: :py:exc:`~pycouchdb.exceptions.Conflict` if save with wrong
revision.
:returns: doc
"""
_doc = copy.copy(doc)
if "_id" not in _doc:
_doc['_id'] = uuid.uuid4().hex
if batch:
params = {'batch': 'ok'}
else:
params = {}
data = utils.force_bytes(utils.to_json(_doc))
(resp, result) = self.resource(_doc['_id']).put(data=data,
params=params)
if resp.status_code == 409:
raise exp.Conflict(result['reason'])
if "rev" in result and result["rev"] is not None:
_doc["_rev"] = result["rev"]
return _doc
def save_bulk(self, docs, transaction=True):
"""
Save a bulk of documents.
.. versionchanged:: 1.2
Now returns a new document list instead of modify the original.
:param docs: list of docs
:param transaction: if ``True``, couchdb do a insert in transaction
model.
:returns: docs
"""
_docs = copy.deepcopy(docs)
# Insert _id field if it not exists
for doc in _docs:
if "_id" not in doc:
doc["_id"] = uuid.uuid4().hex
data = utils.force_bytes(utils.to_json({"docs": _docs}))
params = {"all_or_nothing": "true" if transaction else "false"}
(resp, results) = self.resource.post("_bulk_docs", data=data,
params=params)
for result, doc in zip(results, _docs):
if "rev" in result:
doc['_rev'] = result['rev']
return _docs
def all(self, wrapper=None, flat=None, as_list=False, **kwargs):
"""
Execute a builtin view for get all documents.
:param wrapper: wrap result into a specific class.
:param as_list: return a list of results instead of a
default lazy generator.
:param flat: get a specific field from a object instead
of a complete object.
.. versionadded: 1.4
Add as_list parameter.
Add flat parameter.
:returns: generator object
"""
params = {"include_docs": "true"}
params.update(kwargs)
data = None
if "keys" in params:
data = {"keys": params.pop("keys")}
data = utils.force_bytes(utils.to_json(data))
params = utils.encode_view_options(params)
if data:
(resp, result) = self.resource.post("_all_docs",
params=params, data=data)
else:
(resp, result) = self.resource.get("_all_docs", params=params)
if wrapper is None:
wrapper = lambda doc: doc
if flat is not None:
wrapper = lambda doc: doc[flat]
def _iterate():
for row in result["rows"]:
yield wrapper(row)
if as_list:
return list(_iterate())
return _iterate()
def cleanup(self):
"""
Execute a cleanup operation.
"""
(r, result) = self.resource('_view_cleanup').post()
return result
def commit(self):
"""
Send commit message to server.
"""
(resp, result) = self.resource.post('_ensure_full_commit')
return result
def compact(self):
"""
Send compact message to server. Compacting write-heavy databases
should be avoided, otherwise the process may not catch up with
the writes. Read load has no effect.
"""
(r, result) = self.resource("_compact").post()
return result
def compact_view(self, ddoc):
"""
Execute compact over design view.
:raises: :py:exc:`~pycouchdb.exceptions.NotFound`
if a view does not exists.
"""
(r, result) = self.resource("_compact", ddoc).post()
return result
def revisions(self, doc_id, status='available', params=None, **kwargs):
"""
Get all revisions of one document.
:param doc_id: document id
:param status: filter of reverion status, set empty to list all
:raises: :py:exc:`~pycouchdb.exceptions.NotFound`
if a view does not exists.
:returns: generator object
"""
if params:
warnings.warn("params parameter is now deprecated in favor to"
"**kwargs usage.", DeprecationWarning)
if params is None:
params = {}
params.update(kwargs)
if not params.get('revs_info'):
params['revs_info'] = 'true'
resource = self.resource(doc_id)
(resp, result) = resource.get(params=params)
if resp.status_code == 404:
raise exp.NotFound("Document id `{0}` not found".format(doc_id))
for rev in result['_revs_info']:
if status and rev['status'] == status:
yield self.get(doc_id, rev=rev['rev'])
elif not status:
yield self.get(doc_id, rev=rev['rev'])
def delete_attachment(self, doc, filename):
"""
Delete attachment by filename from document.
.. versionchanged:: 1.2
Now returns a new document instead of modify the original.
:param doc: document dict
:param filename: name of attachment.
:raises: :py:exc:`~pycouchdb.exceptions.Conflict`
if save with wrong revision.
:returns: doc
"""
_doc = copy.deepcopy(doc)
resource = self.resource(_doc['_id'])
(resp, result) = resource.delete(filename, params={'rev': _doc['_rev']})
if resp.status_code == 404:
raise exp.NotFound("filename {0} not found".format(filename))
if resp.status_code > 205:
raise exp.Conflict(result['reason'])
_doc['_rev'] = result['rev']
try:
del _doc['_attachments'][filename]
if not _doc['_attachments']:
del _doc['_attachments']
except KeyError:
pass
return _doc
def get_attachment(self, doc, filename, stream=False, **kwargs):
"""
Get attachment by filename from document.
:param doc: document dict
:param filename: attachment file name.
:param stream: setup streaming output (default: False)
.. versionchanged: 1.5
Add stream parameter for obtain very large attachments
without load all file to the memory.
:returns: binary data or
"""
params = {"rev": doc["_rev"]}
params.update(kwargs)
r, result = self.resource(doc['_id']).get(filename, stream=stream,
params=params)
if stream:
return _StreamResponse(r)
return r.content
def put_attachment(self, doc, content, filename=None, content_type=None):
"""
Put a attachment to a document.
.. versionchanged:: 1.2
Now returns a new document instead of modify the original.
:param doc: document dict.
:param content: the content to upload, either a file-like object or
bytes
:param filename: the name of the attachment file; if omitted, this
function tries to get the filename from the file-like
object passed as the `content` argument value
:raises: :py:exc:`~pycouchdb.exceptions.Conflict`
if save with wrong revision.
:raises: ValueError
:returns: doc
"""
if filename is None:
if hasattr(content, 'name'):
filename = os.path.basename(content.name)
else:
raise ValueError('no filename specified for attachment')
if content_type is None:
content_type = ';'.join(
filter(None, mimetypes.guess_type(filename)))
headers = {"Content-Type": content_type}
resource = self.resource(doc['_id'])
(resp, result) = resource.put(filename, data=content,
params={'rev': doc['_rev']}, headers=headers)
if resp.status_code < 206:
return self.get(doc["_id"])
raise exp.Conflict(result['reason'])
def one(self, name, flat=None, wrapper=None, **kwargs):
"""
Execute a design document view query and returns a firts
result.
:param name: name of the view (eg: docidname/viewname).
:param wrapper: wrap result into a specific class.
:param flat: get a specific field from a object instead
of a complete object.
.. versionadded: 1.4
:returns: object or None
"""
params = {"limit": 1}
params.update(kwargs)
path = utils._path_from_name(name, '_view')
data = None
if "keys" in params:
data = {"keys": params.pop('keys')}
if data:
data = utils.force_bytes(utils.to_json(data))
params = utils.encode_view_options(params)
result = list(self._query(self.resource(*path), wrapper=wrapper,
flat=flat, params=params, data=data))
return result[0] if len(result) > 0 else None
def _query(self, resource, data=None, params=None, headers=None,
flat=None, wrapper=None):
if data is None:
(resp, result) = resource.get(params=params, headers=headers)
else:
(resp, result) = resource.post(data=data, params=params,
headers=headers)
if wrapper is None:
wrapper = lambda row: row
if flat is not None:
wrapper = lambda row: row[flat]
for row in result["rows"]:
yield wrapper(row)
def temporary_query(self, map_func, reduce_func=None, language='javascript',
wrapper=None, as_list=False, **kwargs):
"""
Execute a temporary view.
:param map_func: unicode string with a map function definition.
:param reduce_func: unicode string with a reduce function definition.
:param language: language used for define above functions.
:param wrapper: wrap result into a specific class.
:param as_list: return a list of results instead of a default
lazy generator.
:param flat: get a specific field from a object instead of a
complete object.
.. versionchanged: 1.4
Add as_list parameter.
Add flat parameter.
:returns: generator object
"""
params = copy.copy(kwargs)
data = {'map': map_func, 'language': language}
if "keys" in params:
data["keys"] = params.pop("keys")
if reduce_func:
data["reduce"] = reduce_func
params = utils.encode_view_options(params)
data = utils.force_bytes(utils.to_json(data))
result = self._query(self.resource("_temp_view"), params=params,
data=data, wrapper=wrapper)
if as_list:
return list(result)
return result
def query(self, name, wrapper=None, flat=None, as_list=False, **kwargs):
"""
Execute a design document view query.
:param name: name of the view (eg: docidname/viewname).
:param wrapper: wrap result into a specific class.
:param as_list: return a list of results instead of a
default lazy generator.
:param flat: get a specific field from a object instead
of a complete object.
.. versionadded: 1.4
Add as_list parameter.
Add flat parameter.
:returns: generator object
"""
params = copy.copy(kwargs)
path = utils._path_from_name(name, '_view')
data = None
if "keys" in params:
data = {"keys": params.pop('keys')}
if data:
data = utils.force_bytes(utils.to_json(data))
params = utils.encode_view_options(params)
result = self._query(self.resource(*path), wrapper=wrapper,
flat=flat, params=params, data=data)
if as_list:
return list(result)
return result
def changes_feed(self, feed_reader, **kwargs):
"""
Subscribe to changes feed of couchdb database.
Note: this method is blocking.
:param feed_reader: callable or :py:class:`~BaseFeedReader`
instance
.. versionadded: 1.5
"""
object = self
_listen_feed(object, "_changes", feed_reader, **kwargs)
def changes_list(self, **kwargs):
"""
Obtain a list of changes from couchdb.
.. versionadded: 1.5
"""
(resp, result) = self.resource("_changes").get(params=kwargs)
return result['last_seq'], result['results']
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for checking data restrictions and limits
"""
from functools import partial
import re
import six
from nailgun.errors import errors
from nailgun.expression import Expression
from nailgun.utils import camel_to_snake_case
from nailgun.utils import compact
from nailgun.utils import flatten
class LimitsMixin(object):
"""Mixin which extends nailgun objects with limits
processing functionality
"""
def check_node_limits(self, models, nodes, role,
limits, limit_reached=True,
limit_types=['min', 'max', 'recommended']):
"""Check nodes limits for current role
:param models: objects which represent models in restrictions
:type models: dict
:param nodes: list of nodes to check limits count for role
:type nodes: list
:param role: node role name
:type role: string
:param limits: object with min|max|recommended values and overrides
:type limits: dict
:param limit_reached: flag to check possibility adding/removing nodes
:type limit_reached: bool
:param limit_types: List of possible limit types (min|max|recommended)
:type limit_types: list
:returns: dict -- object with bool 'valid' flag and related information
"""
self.checked_limit_types = {}
self.models = models
self.overrides = limits.get('overrides', [])
self.limit_reached = limit_reached
self.limit_types = limit_types
self.limit_values = {
'max': self._evaluate_expression(
limits.get('max'), self.models),
'min': self._evaluate_expression(
limits.get('min'), self.models),
'recommended': self._evaluate_expression(
limits.get('recommended'), self.models)
}
self.count = len(filter(
lambda node: not(node.pending_deletion) and (role in node.roles),
nodes))
self.messages = compact(flatten(
map(self._check_override, self.overrides)))
self.messages += compact(flatten(
map(self._check_limit_type, self.limit_types)))
self.messages = compact(flatten(
map(self._get_message, limit_types)))
self.messages = '. '.join(self.messages)
return {
'count': self.count,
'limits': self.limit_values,
'messages': self.messages,
'valid': not self.messages
}
def _check_limit(self, obj, limit_type):
"""Check limit value with nodes count
:param obj: limits or overrides item data
:type obj: dict
:param limit_type: one of (min|max|recommended) values
:type limit_type: string
:returns: dict -- message data in format:
{
'type': 'min|max|recommended'
'value': '1',
'message': 'Message for limit'
}
"""
if not obj.get(limit_type):
return
if limit_type == 'min':
compare = lambda a, b: a < b if self.limit_reached else a <= b
elif limit_type == 'max':
compare = lambda a, b: a > b if self.limit_reached else a >= b
else:
compare = lambda a, b: a < b
limit_value = int(
self._evaluate_expression(obj.get(limit_type), self.models))
self.limit_values[limit_type] = limit_value
self.checked_limit_types[limit_type] = True
# TODO(apopovych): write proper default message
if compare(self.count, limit_value):
return {
'type': limit_type,
'value': limit_value,
'message': obj.get('message', 'Default message')
}
def _check_override(self, override):
"""Check overridden restriction for limit
"""
expression = override.get('condition')
result = self._evaluate_expression(expression, self.models)
if result:
return map(partial(self._check_limit, override), self.limit_types)
def _check_limit_type(self, limit_type):
"""Check limit types for role
:param limit_type: one of (min|max|recommended) values
:type limit_type: string
"""
if self.checked_limit_types.get(limit_type):
return
return self._check_limit(self.limit_values, limit_type)
def _get_message(self, limit_type):
"""Get proper message if we have more than one
:param limit_type: one of (min|max|recommended) values
:type limit_type: string
:returns: string -- first relevant message
"""
message = sorted(filter(
lambda message: message.get('type') == limit_type,
self.messages), key=lambda message: message.get('value'))
if limit_type != 'max':
message = message[::-1]
if message:
return message[0].get('message')
def _evaluate_expression(self, expression, models):
"""Evaluate expression if it exists
"""
if expression:
return Expression(str(expression), models).evaluate()
class RestrictionMixin(object):
"""Mixin which extend nailgun objects with restriction
processing functionality
"""
@classmethod
def check_restrictions(cls, models, restrictions, action=None):
"""Check if attribute satisfied restrictions
:param models: objects which represent models in restrictions
:type models: dict
:param restrictions: list of restrictions to check
:type restrictions: list
:param action: filtering restrictions by action key
:type action: string
:returns: dict -- object with 'result' as number and 'message' as dict
"""
satisfied = []
if restrictions:
expened_restrictions = map(
cls._expand_restriction, restrictions)
# Filter by action
if action:
filterd_by_action_restrictions = filter(
lambda item: item.get('action') == action,
expened_restrictions)
else:
filterd_by_action_restrictions = expened_restrictions[:]
# Filter which restriction satisfied condition
satisfied = filter(
lambda item: Expression(
item.get('condition'), models).evaluate(),
filterd_by_action_restrictions)
return {
'result': bool(satisfied),
'message': '. '.join([item.get('message') for item in
satisfied if item.get('message')])
}
@staticmethod
def _expand_restriction(restriction):
"""Get restriction in different formats like string, short
or long dict formats and return in one canonical format
:param restriction: restriction object
:type restriction: string|dict
:returns: dict -- restriction object in canonical format:
{
'action': 'enable|disable|hide|none'
'condition': 'value1 == value2',
'message': 'value1 shouldn't equal value2'
}
"""
result = {
'action': 'disable'
}
if isinstance(restriction, six.string_types):
result['condition'] = restriction
elif isinstance(restriction, dict):
if 'condition' in restriction:
result.update(restriction)
else:
result['condition'] = list(restriction)[0]
result['message'] = list(restriction.values())[0]
else:
raise errors.InvalidData('Invalid restriction format')
return result
class AttributesRestriction(RestrictionMixin):
@classmethod
def check_data(cls, models, data):
"""Check cluster attributes data
:param models: objects which represent models in restrictions
:type models: dict
:param data: cluster attributes object
:type data: list|dict
:retruns: func -- generator which produces errors
"""
def find_errors(data=data):
"""Generator which traverses through cluster attributes tree
checks restrictions for attributes and values for correctness
with regex
"""
if isinstance(data, dict):
restr = cls.check_restrictions(
models, data.get('restrictions', []))
if restr.get('result'):
# TODO(apopovych): handle restriction message
return
else:
regex_error = cls.validate_regex(data)
if regex_error is not None:
yield regex_error
for key, value in six.iteritems(data):
if key not in ['restrictions', 'regex']:
for err in find_errors(value):
yield err
elif isinstance(data, list):
for item in data:
for err in find_errors(item):
yield err
return list(find_errors())
@staticmethod
def validate_regex(data):
attr_regex = data.get('regex', {})
if attr_regex:
value = data.get('value')
if not isinstance(value, basestring):
return ('Value {0} is of invalid type, cannot check '
'regexp'.format(value))
pattern = re.compile(attr_regex.get('source'))
if not pattern.search(value):
return attr_regex.get('error')
class VmwareAttributesRestriction(RestrictionMixin):
@classmethod
def check_data(cls, models, metadata, data):
"""Check cluster vmware attributes data
:param models: objects which represent models in restrictions
:type models: dict
:param metadata: vmware attributes metadata object
:type metadata: list|dict
:param data: vmware attributes data(value) object
:type data: list|dict
:retruns: func -- generator which produces errors
"""
root_key = camel_to_snake_case(cls.__name__)
def find_errors(metadata=metadata, path_key=root_key):
"""Generator for vmware attributes errors which for each
attribute in 'metadata' gets relevant values from vmware
'value' and checks them with restrictions and regexs
"""
if isinstance(metadata, dict):
restr = cls.check_restrictions(
models, metadata.get('restrictions', []))
if restr.get('result'):
# TODO(apopovych): handle restriction message?
return
else:
for mkey, mvalue in six.iteritems(metadata):
if mkey == 'name':
value_path = path_key.replace(
root_key, '').replace('.fields', '')
values = cls._get_values(value_path, data)
attr_regex = metadata.get('regex', {})
if attr_regex:
pattern = re.compile(attr_regex.get('source'))
for value in values():
if not pattern.match(value):
yield attr_regex.get('error')
for err in find_errors(
mvalue, '.'.join([path_key, mkey])):
yield err
elif isinstance(metadata, list):
for i, item in enumerate(metadata):
current_key = item.get('name') or str(i)
for err in find_errors(
item, '.'.join([path_key, current_key])):
yield err
return list(find_errors())
@classmethod
def _get_values(cls, path, data):
"""Generator for all values from data selected by given path
:param path: path to all releted values
:type path: string
:param data: vmware attributes value
:type data: list|dict
"""
keys = path.split('.')
key = keys[-1]
def find(data=data):
if isinstance(data, dict):
for k, v in six.iteritems(data):
if k == key:
yield v
elif k in keys:
for result in find(v):
yield result
elif isinstance(data, list):
for d in data:
for result in find(d):
yield result
return find
|
|
# -*- coding: utf-8 -*-
# test cases for HOT Export Tasks
import cPickle
import logging
import os
import sys
import uuid
from mock import Mock, PropertyMock, patch
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.test import TestCase
from celery.datastructures import ExceptionInfo
from jobs import presets
from jobs.models import Job, Tag
from tasks.export_tasks import (
ExportTaskErrorHandler, FinalizeRunTask, GarminExportTask,
GeneratePresetTask, KmlExportTask, ObfExportTask, OSMConfTask,
OSMPrepSchemaTask, OSMToPBFConvertTask, OverpassQueryTask, ShpExportTask
)
from tasks.models import ExportRun, ExportTask, ExportTaskResult
logger = logging.getLogger(__name__)
class TestExportTasks(TestCase):
def setUp(self,):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
# bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
bbox = Polygon.from_bbox((-10.85, 6.25, -10.62, 6.40))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.job.feature_save = True
self.job.feature_pub = True
self.job.save()
self.run = ExportRun.objects.create(job=self.job, user=self.user)
parser = presets.PresetParser(self.path + '/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(238, len(tags))
# save all the tags from the preset
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types']
)
self.assertEquals(238, self.job.tags.all().count())
@patch('celery.app.task.Task.request')
@patch('utils.osmconf.OSMConfig')
def test_run_osmconf_task(self, mock_config, mock_request):
task = OSMConfTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
osm_conf = mock_config.return_value
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
job_name = self.job.name.lower()
expected_output_path = stage_dir + '/' + job_name + '.ini'
osm_conf.create_osm_conf.return_value = expected_output_path
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
osm_conf.create_osm_conf.assert_called_with(stage_dir=stage_dir)
self.assertEquals(expected_output_path, result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('celery.app.task.Task.request')
@patch('utils.overpass.Overpass')
def test_run_overpass_task(self, overpass, mock_request):
task = OverpassQueryTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
overpass = overpass.return_value
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
job_name = self.job.name.lower()
raw_osm_path = stage_dir + '/' + 'query.osm'
expected_output_path = stage_dir + '/' + job_name + '.osm'
overpass.run_query.return_value = raw_osm_path
overpass.filter.return_value = expected_output_path
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
overpass.run_query.assert_called_once()
overpass.filter.assert_called_once()
self.assertEquals(expected_output_path, result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('celery.app.task.Task.request')
@patch('utils.pbf.OSMToPBF')
def test_run_osmtopbf_task(self, mock_overpass, mock_request):
task = OSMToPBFConvertTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
osmtopbf = mock_overpass.return_value
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
job_name = self.job.name.lower()
expected_output_path = stage_dir + '/' + job_name + '.pbf'
osmtopbf.convert.return_value = expected_output_path
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
osmtopbf.convert.assert_called_once()
self.assertEquals(expected_output_path, result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('celery.app.task.Task.request')
@patch('utils.osmparse.OSMParser')
def test_run_osmprepschema_task(self, mock_parser, mock_request):
task = OSMPrepSchemaTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
prep_schema = mock_parser.return_value
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid) + '/'
job_name = self.job.name.lower()
expected_output_path = stage_dir + job_name + '.sqlite'
prep_schema.instancemethod.return_value = expected_output_path
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
prep_schema.instancemethod.assert_called_once()
prep_schema.create_spatialite.assert_called_once()
prep_schema.create_default_schema.assert_called_once()
prep_schema.upate_zindexes.assert_called_once()
self.assertEquals(expected_output_path, result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('celery.app.task.Task.request')
@patch('utils.shp.SQliteToShp')
def test_run_shp_export_task(self, mock, mock_request):
task = ShpExportTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
sqlite_to_shp = mock.return_value
job_name = self.job.name.lower()
sqlite_to_shp.convert.return_value = '/path/to/' + job_name + '.shp'
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
sqlite_to_shp.convert.assert_called_once()
self.assertEquals('/path/to/' + job_name + '.shp', result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('shutil.rmtree')
@patch('shutil.move')
@patch('celery.app.task.Task.request')
@patch('utils.osmand.OSMToOBF')
def test_run_obf_export_task(self, mock_obf, mock_request,
mock_move, mock_rmtree):
task = ObfExportTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
osm_to_obf = mock_obf.return_value
shutil_move = mock_move.return_value
shutil_rmtree = mock_rmtree.return_value
job_name = self.job.name.lower()
expected_output_path = '/home/ubuntu/export_staging/' + str(self.run.uid) + '/' + job_name + '.obf'
osm_to_obf.convert.return_value = expected_output_path
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid) + '/'
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
osm_to_obf.convert.assert_called_once()
shutil_move.assert_called_once()
shutil_rmtree.assert_called_once()
self.assertEquals(expected_output_path, result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('shutil.rmtree')
@patch('shutil.move')
@patch('celery.app.task.Task.request')
@patch('utils.garmin.OSMToIMG')
def test_run_garmin_export_task(self, mock_obf, mock_request,
mock_move, mock_rmtree):
task = GarminExportTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
osm_to_img = mock_obf.return_value
shutil_move = mock_move.return_value
shutil_rmtree = mock_rmtree.return_value
job_name = self.job.name.lower()
expected_output_path = '/home/ubuntu/export_staging/' + str(self.run.uid) + '/' + job_name + '_garmin.zip'
osm_to_img.run_mkgmap.return_value = expected_output_path
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid) + '/'
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
osm_to_img.run_mkgmap.assert_called_once()
shutil_move.assert_called_once()
shutil_rmtree.assert_called_once()
self.assertEquals(expected_output_path, result['result'])
# test tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('celery.app.task.Task.request')
@patch('utils.kml.SQliteToKml')
def test_run_kml_export_task(self, mock_kml, mock_request):
task = KmlExportTask()
celery_uid = str(uuid.uuid4())
type(mock_request).id = PropertyMock(return_value=celery_uid)
sqlite_to_kml = mock_kml.return_value
job_name = self.job.name.lower()
expected_output_path = '/home/ubuntu/export_staging/' + str(self.run.uid) + '/' + job_name + '.kmz'
sqlite_to_kml.convert.return_value = expected_output_path
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid) + '/'
saved_export_task = ExportTask.objects.create(run=self.run, status='PENDING', name=task.name)
result = task.run(run_uid=str(self.run.uid), stage_dir=stage_dir, job_name=job_name)
sqlite_to_kml.convert.assert_called_once()
self.assertEquals(expected_output_path, result['result'])
# test the tasks update_task_state method
run_task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(run_task)
self.assertEquals('RUNNING', run_task.status)
@patch('os.makedirs')
@patch('os.path.exists')
@patch('shutil.copy')
@patch('os.stat')
def test_task_on_success(self, os_stat, shutil_copy, exists, mkdirs):
exists.return_value = False # download dir doesn't exist
osstat = os_stat.return_value
type(osstat).st_size = PropertyMock(return_value=1234567890)
shp_export_task = ShpExportTask()
celery_uid = str(uuid.uuid4())
# assume task is running
running_task = ExportTask.objects.create(
run=self.run,
celery_uid=celery_uid,
status='RUNNING',
name=shp_export_task.name
)
shp_export_task = ShpExportTask()
download_url = '/downloads/' + str(self.run.uid) + '/file.shp'
download_root = settings.EXPORT_DOWNLOAD_ROOT
run_dir = '{0}{1}'.format(download_root, str(self.run.uid))
shp_export_task.on_success(retval={'result': download_url}, task_id=celery_uid,
args={}, kwargs={'run_uid': str(self.run.uid)})
os_stat.assert_called_once_with(download_url)
exists.assert_called_once_with(run_dir)
mkdirs.assert_called_once_with(run_dir)
task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(task)
result = task.result
self.assertIsNotNone(result)
self.assertEqual(task, result.task)
self.assertEquals('SUCCESS', task.status)
self.assertEquals('Default Shapefile Export', task.name)
# pull out the result and test
result = ExportTaskResult.objects.get(task__celery_uid=celery_uid)
self.assertIsNotNone(result)
self.assertEquals(download_url, result.download_url)
def test_task_on_failure(self,):
shp_export_task = ShpExportTask()
celery_uid = str(uuid.uuid4())
# assume task is running
running_task = ExportTask.objects.create(
run=self.run,
celery_uid=celery_uid,
status='RUNNING',
name=shp_export_task.name
)
exc = None
exc_info = None
try:
raise ValueError('some unexpected error')
except ValueError as e:
exc = e
exc_info = sys.exc_info()
einfo = ExceptionInfo(exc_info=exc_info)
shp_export_task.on_failure(exc, task_id=celery_uid, einfo=einfo,
args={}, kwargs={'run_uid': str(self.run.uid)})
task = ExportTask.objects.get(celery_uid=celery_uid)
self.assertIsNotNone(task)
exception = task.exceptions.all()[0]
exc_info = cPickle.loads(str(exception.exception)).exc_info
error_type, msg, tb = exc_info[0], exc_info[1], exc_info[2]
self.assertEquals(error_type, ValueError)
self.assertEquals('some unexpected error', str(msg))
# traceback.print_exception(error_type, msg, tb)
@patch('celery.app.task.Task.request')
def test_generate_preset_task(self, mock_request):
task = GeneratePresetTask()
run_uid = self.run.uid
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
celery_uid = str(uuid.uuid4())
running_task = ExportTask.objects.create(
run=self.run,
celery_uid=celery_uid,
status='RUNNING',
name=task.name
)
type(mock_request).id = PropertyMock(return_value=celery_uid)
result = task.run(run_uid=run_uid, job_name='testjob')
expected_result = stage_dir + 'testjob_preset.xml'
config = self.job.configs.all()[0]
expected_path = config.upload.path
self.assertEquals(result['result'], expected_path)
os.remove(expected_path)
@patch('django.core.mail.EmailMessage')
@patch('shutil.rmtree')
def test_finalize_run_task(self, rmtree, email):
celery_uid = str(uuid.uuid4())
run_uid = self.run.uid
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
succeeded_task = ExportTask.objects.create(
run=self.run,
celery_uid=celery_uid,
status='SUCCESS',
name='Default Shapefile Export'
)
task = FinalizeRunTask()
self.assertEquals('Finalize Export Run', task.name)
task.run(run_uid=run_uid, stage_dir=stage_dir)
rmtree.assert_called_once_with(stage_dir)
msg = Mock()
email.return_value = msg
msg.send.assert_called_once()
# self.assertEquals('SUCCESS', self.run.status)
@patch('django.core.mail.EmailMessage')
@patch('shutil.rmtree')
@patch('os.path.isdir')
def test_export_task_error_handler(self, isdir, rmtree, email):
celery_uid = str(uuid.uuid4())
run_uid = self.run.uid
stage_dir = settings.EXPORT_STAGING_ROOT + str(self.run.uid)
succeeded_task = ExportTask.objects.create(
run=self.run,
celery_uid=celery_uid,
status='SUCCESS',
name='Default Shapefile Export'
)
task = ExportTaskErrorHandler()
self.assertEquals('Export Task Error Handler', task.name)
task.run(run_uid=run_uid, stage_dir=stage_dir)
isdir.assert_called_once_with(stage_dir)
# rmtree.assert_called_once_with(stage_dir)
msg = Mock()
email.return_value = msg
msg.send.assert_called_once()
# self.assertEquals('FAILED', self.run.status)
|
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2013 Bartosz Zaczynski
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities for executing microanalyst scripts in a native terminal
such as cmd.exe or xterm according to platform-specific syntax
and shell commands.
"""
import os
import platform
from microanalyst.commons.osutils import TempFile
class ScriptWrapper(object):
"""Transforms metadata into shell command and executes it."""
def __init__(self, iterations, filename, keep_json, genes):
"""
iterations: [{'files':[''],'control':tk.StringVar()}]
filename: str, output Excel(TM)
keep_json: bool
"""
self.iterations = iterations
self.filename = filename
self.keep_json = keep_json
self.genes = genes
self.shell = _get_shell()
def xlsh(self):
"""Call xlsh.py for microplates in separate worksheets."""
cmd_builder = self._get_builder()
cmd_builder.xlsh()
self._execute(cmd_builder)
def xlsv(self):
"""Call xlsv.py for microplates in a single worksheet."""
cmd_builder = self._get_builder()
cmd_builder.xlsv()
self._execute(cmd_builder)
def _execute(self, cmd_builder):
message = 'echo Please wait while processing... && '
command = self.shell.template % (message + str(cmd_builder))
os.system(command)
def _get_builder(self):
cmd_builder = CommandBuilder(self.shell,
self.iterations,
self.filename,
self.genes)
cmd_builder.group().control().assemble()
if self.genes is not None:
cmd_builder.genes()
if self.keep_json:
cmd_builder.redirect()
return cmd_builder
class CommandBuilder(object):
"""Builder of a shell command according to platform-specific syntax."""
def __init__(self, shell, iterations, filename, genes):
self.shell = shell
self.iterations = iterations
self.filename = filename
self.genes_def = genes
self.command = []
def __str__(self):
return ' '.join(self.command)
def group(self):
"""Generate group.py script invocation, e.g.
$ group.py "file1" "file2" | group.py "file3" | ...
"""
for iteration in self.iterations:
self.command.append('group.py')
for filename in iteration['files']:
self.command.append(self.shell.quote(filename))
self.command.append('|')
return self
def control(self):
"""Generate control.py script invocation, e.g.
$ ... | control.py "file1" - "file3" | ...
"""
control = ['control.py']
for iteration in self.iterations:
filename = iteration['control'].get()
if filename:
control.append(self.shell.quote(filename))
else:
control.append('-')
self.command.extend(control)
self.command.append('|')
return self
def assemble(self):
"""Generate assemble.py script invocation."""
self.command.append('assemble.py')
return self
def genes(self):
"""Generate genes.py script invocation, e.g.
$ ... | genes.py "file" | ...
"""
tmp = TempFile()
tmp.write(self.genes_def)
self.command.append('|')
self.command.append('genes.py')
self.command.append(self.shell.quote(tmp.name()))
return self
def redirect(self):
"""Keep intermediate JSON by redirecting assemble.py to a file."""
json_filename = self.shell.quote(self.filename[:-4] + '.json')
self.command.append(r'> %s && %s %s' % (json_filename,
self.shell.cat,
json_filename))
return self
def xlsh(self):
"""Generate xlsh.py script invocation (horizontal layout)."""
return self._xls_script('xlsh.py')
def xlsv(self):
"""Generate xlsh.py script invocation (vertical layout)."""
return self._xls_script('xlsv.py')
def _xls_script(self, name):
self.command.append('| ' + name)
self.command.append(r'%s -f' % self.shell.quote(self.filename))
return self
class WindowsShell(object):
"""Command templates for a Windows shell."""
def __init__(self):
self.template = r'start cmd /c "%s"'
self.cat = 'type'
def quote(self, filename):
"""Escape filename with Windows-style quotation marks."""
return r'"%s"' % filename
class UnixShell(object):
"""Command templates for a Mac/Linux/Unix shell."""
def __init__(self):
self.template = r'xterm -e "%s && sleep 5s" &'
self.cat = 'cat'
def quote(self, filename):
"""Escape filename with Unix-style quotation marks."""
return r'\"%s\"' % filename
def _get_shell():
"""Return shell templates for the current OS."""
return WindowsShell() if _is_windows() else UnixShell()
def _is_windows():
"""Return true if the current OS is Windows, false otherwise."""
return platform.system() == 'Windows'
|
|
import collections
import inspect
import re
from functools import wraps
import sys
from contextlib import contextmanager
import itertools
from voluptuous import error as er
if sys.version_info >= (3,):
long = int
unicode = str
basestring = str
ifilter = filter
def iteritems(d):
return d.items()
else:
from itertools import ifilter
def iteritems(d):
return d.iteritems()
if sys.version_info >= (3, 3):
_Mapping = collections.abc.Mapping
else:
_Mapping = collections.Mapping
"""Schema validation for Python data structures.
Given eg. a nested data structure like this:
{
'exclude': ['Users', 'Uptime'],
'include': [],
'set': {
'snmp_community': 'public',
'snmp_timeout': 15,
'snmp_version': '2c',
},
'targets': {
'localhost': {
'exclude': ['Uptime'],
'features': {
'Uptime': {
'retries': 3,
},
'Users': {
'snmp_community': 'monkey',
'snmp_port': 15,
},
},
'include': ['Users'],
'set': {
'snmp_community': 'monkeys',
},
},
},
}
A schema like this:
>>> settings = {
... 'snmp_community': str,
... 'retries': int,
... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
... }
>>> features = ['Ping', 'Uptime', 'Http']
>>> schema = Schema({
... 'exclude': features,
... 'include': features,
... 'set': settings,
... 'targets': {
... 'exclude': features,
... 'include': features,
... 'features': {
... str: settings,
... },
... },
... })
Validate like so:
>>> schema({
... 'set': {
... 'snmp_community': 'public',
... 'snmp_version': '2c',
... },
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {
... 'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'},
... },
... },
... }) == {
... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
... 'targets': {
... 'exclude': ['Ping'],
... 'features': {'Uptime': {'retries': 3},
... 'Users': {'snmp_community': 'monkey'}}}}
True
"""
# options for extra keys
PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
def _isnamedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
primitive_types = (str, unicode, bool, int, float)
class Undefined(object):
def __nonzero__(self):
return False
def __repr__(self):
return '...'
UNDEFINED = Undefined()
def Self():
raise er.SchemaError('"Self" should never be called')
def default_factory(value):
if value is UNDEFINED or callable(value):
return value
return lambda: value
@contextmanager
def raises(exc, msg=None, regex=None):
try:
yield
except exc as e:
if msg is not None:
assert str(e) == msg, '%r != %r' % (str(e), msg)
if regex is not None:
assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
def Extra(_):
"""Allow keys in the data that are not present in the schema."""
raise er.SchemaError('"Extra" should never be called')
# As extra() is never called there's no way to catch references to the
# deprecated object, so we just leave an alias here instead.
extra = Extra
class Schema(object):
"""A validation schema.
The schema is a Python tree-like structure where nodes are pattern
matched against corresponding trees of values.
Nodes can be values, in which case a direct comparison is used, types,
in which case an isinstance() check is performed, or callables, which will
validate and optionally convert the value.
We can equate schemas also.
For Example:
>>> v = Schema({Required('a'): unicode})
>>> v1 = Schema({Required('a'): unicode})
>>> v2 = Schema({Required('b'): unicode})
>>> assert v == v1
>>> assert v != v2
"""
_extra_to_name = {
REMOVE_EXTRA: 'REMOVE_EXTRA',
ALLOW_EXTRA: 'ALLOW_EXTRA',
PREVENT_EXTRA: 'PREVENT_EXTRA',
}
def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
"""Create a new Schema.
:param schema: Validation schema. See :module:`voluptuous` for details.
:param required: Keys defined in the schema must be in the data.
:param extra: Specify how extra keys in the data are treated:
- :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
extra keys (raise ``Invalid``).
- :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
keys in the output.
- :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
from the output.
- Any value other than the above defaults to
:const:`~voluptuous.PREVENT_EXTRA`
"""
self.schema = schema
self.required = required
self.extra = int(extra) # ensure the value is an integer
self._compiled = self._compile(schema)
@classmethod
def infer(cls, data, **kwargs):
"""Create a Schema from concrete data (e.g. an API response).
For example, this will take a dict like:
{
'foo': 1,
'bar': {
'a': True,
'b': False
},
'baz': ['purple', 'monkey', 'dishwasher']
}
And return a Schema:
{
'foo': int,
'bar': {
'a': bool,
'b': bool
},
'baz': [str]
}
Note: only very basic inference is supported.
"""
def value_to_schema_type(value):
if isinstance(value, dict):
if len(value) == 0:
return dict
return {k: value_to_schema_type(v)
for k, v in iteritems(value)}
if isinstance(value, list):
if len(value) == 0:
return list
else:
return [value_to_schema_type(v)
for v in value]
return type(value)
return cls(value_to_schema_type(data), **kwargs)
def __eq__(self, other):
if not isinstance(other, Schema):
return False
return other.schema == self.schema
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.schema)
def __repr__(self):
return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
self.schema, self._extra_to_name.get(self.extra, '??'),
self.required, id(self))
def __call__(self, data):
"""Validate data against this schema."""
try:
return self._compiled([], data)
except er.MultipleInvalid:
raise
except er.Invalid as e:
raise er.MultipleInvalid([e])
# return self.validate([], self.schema, data)
def _compile(self, schema):
if schema is Extra:
return lambda _, v: v
if schema is Self:
return lambda p, v: self._compiled(p, v)
elif hasattr(schema, "__voluptuous_compile__"):
return schema.__voluptuous_compile__(self)
if isinstance(schema, Object):
return self._compile_object(schema)
if isinstance(schema, _Mapping):
return self._compile_dict(schema)
elif isinstance(schema, list):
return self._compile_list(schema)
elif isinstance(schema, tuple):
return self._compile_tuple(schema)
elif isinstance(schema, (frozenset, set)):
return self._compile_set(schema)
type_ = type(schema)
if inspect.isclass(schema):
type_ = schema
if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
list, dict, type(None)) or callable(schema):
return _compile_scalar(schema)
raise er.SchemaError('unsupported schema data type %r' %
type(schema).__name__)
def _compile_mapping(self, schema, invalid_msg=None):
"""Create validator for given mapping."""
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set(key for key in schema
if key is not Extra and
((self.required and not isinstance(key, (Optional, Remove))) or
isinstance(key, Required)))
# Keys that may have defaults
all_default_keys = set(key for key in schema
if isinstance(key, Required) or
isinstance(key, Optional))
_compiled_schema = {}
for skey, svalue in iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
candidates = list(_iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, Undefined) and \
key.schema not in key_value_map:
# A default value has been specified for this missing
# key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
remove_key = False
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except er.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
# check if the key is marked for removal
is_remove = new_key is Remove
try:
cval = cvalue(key_path, value)
# include if it's not marked for removal
if not is_remove:
out[new_key] = cval
else:
remove_key = True
continue
except er.MultipleInvalid as e:
exception_errors.extend(e.errors)
except er.Invalid as e:
exception_errors.append(e)
if exception_errors:
if is_remove or remove_key:
continue
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if remove_key:
# remove key
continue
elif self.extra == ALLOW_EXTRA:
out[key] = value
elif self.extra != REMOVE_EXTRA:
errors.append(er.Invalid('extra keys not allowed', key_path))
# else REMOVE_EXTRA: ignore the key so it's removed from output
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
errors.append(er.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise er.MultipleInvalid(errors)
return out
return validate_mapping
def _compile_object(self, schema):
"""Validate an object.
Has the same behavior as dictionary validator but work with object
attributes.
For example:
>>> class Structure(object):
... def __init__(self, one=None, three=None):
... self.one = one
... self.three = three
...
>>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
>>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
... validate(Structure(one='three'))
"""
base_validate = self._compile_mapping(
schema, invalid_msg='object value')
def validate_object(path, data):
if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
iterable = _iterate_object(data)
iterable = ifilter(lambda item: item[1] is not None, iterable)
out = base_validate(path, iterable, {})
return type(data)(**out)
return validate_object
def _compile_dict(self, schema):
"""Validate a dictionary.
A dictionary schema can contain a set of values, or at most one
validator function/type.
A dictionary schema will only validate a dictionary:
>>> validate = Schema({})
>>> with raises(er.MultipleInvalid, 'expected a dictionary'):
... validate([])
An invalid dictionary value:
>>> validate = Schema({'one': 'two', 'three': 'four'})
>>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
... validate({'one': 'three'})
An invalid key:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
... validate({'two': 'three'})
Validation function, in this case the "int" type:
>>> validate = Schema({'one': 'two', 'three': 'four', int: str})
Valid integer input:
>>> validate({10: 'twenty'})
{10: 'twenty'}
By default, a "type" in the schema (in this case "int") will be used
purely to validate that the corresponding value is of that type. It
will not Coerce the value:
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
... validate({'10': 'twenty'})
Wrap them in the Coerce() function to achieve this:
>>> from voluptuous import Coerce
>>> validate = Schema({'one': 'two', 'three': 'four',
... Coerce(int): str})
>>> validate({'10': 'twenty'})
{10: 'twenty'}
Custom message for required key
>>> validate = Schema({Required('one', 'required'): 'two'})
>>> with raises(er.MultipleInvalid, "required @ data['one']"):
... validate({})
(This is to avoid unexpected surprises.)
Multiple errors for nested field in a dict:
>>> validate = Schema({
... 'adict': {
... 'strfield': str,
... 'intfield': int
... }
... })
>>> try:
... validate({
... 'adict': {
... 'strfield': 123,
... 'intfield': 'one'
... }
... })
... except er.MultipleInvalid as e:
... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
["expected int for dictionary value @ data['adict']['intfield']",
"expected str for dictionary value @ data['adict']['strfield']"]
"""
base_validate = self._compile_mapping(
schema, invalid_msg='dictionary value')
groups_of_exclusion = {}
groups_of_inclusion = {}
for node in schema:
if isinstance(node, Exclusive):
g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
g.append(node)
elif isinstance(node, Inclusive):
g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
g.append(node)
def validate_dict(path, data):
if not isinstance(data, dict):
raise er.DictInvalid('expected a dictionary', path)
errors = []
for label, group in groups_of_exclusion.items():
exists = False
for exclusive in group:
if exclusive.schema in data:
if exists:
msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
"two or more values in the same group of exclusion '%s'" % label
next_path = path + [VirtualPathComponent(label)]
errors.append(er.ExclusiveInvalid(msg, next_path))
break
exists = True
if errors:
raise er.MultipleInvalid(errors)
for label, group in groups_of_inclusion.items():
included = [node.schema in data for node in group]
if any(included) and not all(included):
msg = "some but not all values in the same group of inclusion '%s'" % label
for g in group:
if hasattr(g, 'msg') and g.msg:
msg = g.msg
break
next_path = path + [VirtualPathComponent(label)]
errors.append(er.InclusiveInvalid(msg, next_path))
break
if errors:
raise er.MultipleInvalid(errors)
out = data.__class__()
return base_validate(path, iteritems(data), out)
return validate_dict
def _compile_sequence(self, schema, seq_type):
"""Validate a sequence type.
This is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
_compiled = [self._compile(s) for s in schema]
seq_type_name = seq_type.__name__
def validate_sequence(path, data):
if not isinstance(data, seq_type):
raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
# Empty seq schema, reject any data.
if not schema:
if data:
raise er.MultipleInvalid([
er.ValueInvalid('not a valid value', path if path else data)
])
return data
out = []
invalid = None
errors = []
index_path = UNDEFINED
for i, value in enumerate(data):
index_path = path + [i]
invalid = None
for validate in _compiled:
try:
cval = validate(index_path, value)
if cval is not Remove: # do not include Remove values
out.append(cval)
break
except er.Invalid as e:
if len(e.path) > len(index_path):
raise
invalid = e
else:
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
if _isnamedtuple(data):
return type(data)(*out)
else:
return type(data)(out)
return validate_sequence
def _compile_tuple(self, schema):
"""Validate a tuple.
A tuple is a sequence of valid values or validators tried in order.
>>> validator = Schema(('one', 'two', int))
>>> validator(('one',))
('one',)
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator((3.5,))
>>> validator((1,))
(1,)
"""
return self._compile_sequence(schema, tuple)
def _compile_list(self, schema):
"""Validate a list.
A list is a sequence of valid values or validators tried in order.
>>> validator = Schema(['one', 'two', int])
>>> validator(['one'])
['one']
>>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
... validator([3.5])
>>> validator([1])
[1]
"""
return self._compile_sequence(schema, list)
def _compile_set(self, schema):
"""Validate a set.
A set is an unordered collection of unique elements.
>>> validator = Schema({int})
>>> validator(set([42])) == set([42])
True
>>> with raises(er.Invalid, 'expected a set'):
... validator(42)
>>> with raises(er.MultipleInvalid, 'invalid value in set'):
... validator(set(['a']))
"""
type_ = type(schema)
type_name = type_.__name__
def validate_set(path, data):
if not isinstance(data, type_):
raise er.Invalid('expected a %s' % type_name, path)
_compiled = [self._compile(s) for s in schema]
errors = []
for value in data:
for validate in _compiled:
try:
validate(path, value)
break
except er.Invalid:
pass
else:
invalid = er.Invalid('invalid value in %s' % type_name, path)
errors.append(invalid)
if errors:
raise er.MultipleInvalid(errors)
return data
return validate_set
def extend(self, schema, required=None, extra=None):
"""Create a new `Schema` by merging this and the provided `schema`.
Neither this `Schema` nor the provided `schema` are modified. The
resulting `Schema` inherits the `required` and `extra` parameters of
this, unless overridden.
Both schemas must be dictionary-based.
:param schema: dictionary to extend this `Schema` with
:param required: if set, overrides `required` of this `Schema`
:param extra: if set, overrides `extra` of this `Schema`
"""
assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
result = self.schema.copy()
# returns the key that may have been passed as an argument to Marker constructor
def key_literal(key):
return (key.schema if isinstance(key, Marker) else key)
# build a map that takes the key literals to the needed objects
# literal -> Required|Optional|literal
result_key_map = dict((key_literal(key), key) for key in result)
# for each item in the extension schema, replace duplicates
# or add new keys
for key, value in iteritems(schema):
# if the key is already in the dictionary, we need to replace it
# transform key to literal before checking presence
if key_literal(key) in result_key_map:
result_key = result_key_map[key_literal(key)]
result_value = result[result_key]
# if both are dictionaries, we need to extend recursively
# create the new extended sub schema, then remove the old key and add the new one
if type(result_value) == dict and type(value) == dict:
new_value = Schema(result_value).extend(value).schema
del result[result_key]
result[key] = new_value
# one or the other or both are not sub-schemas, simple replacement is fine
# remove old key and add new one
else:
del result[result_key]
result[key] = value
# key is new and can simply be added
else:
result[key] = value
# recompile and send old object
result_cls = type(self)
result_required = (required if required is not None else self.required)
result_extra = (extra if extra is not None else self.extra)
return result_cls(result, required=result_required, extra=result_extra)
def _compile_scalar(schema):
"""A scalar value.
The schema can either be a value or a type.
>>> _compile_scalar(int)([], 1)
1
>>> with raises(er.Invalid, 'expected float'):
... _compile_scalar(float)([], '1')
Callables have
>>> _compile_scalar(lambda v: float(v))([], '1')
1.0
As a convenience, ValueError's are trapped:
>>> with raises(er.Invalid, 'not a valid value'):
... _compile_scalar(lambda v: float(v))([], 'a')
"""
if inspect.isclass(schema):
def validate_instance(path, data):
if isinstance(data, schema):
return data
else:
msg = 'expected %s' % schema.__name__
raise er.TypeInvalid(msg, path)
return validate_instance
if callable(schema):
def validate_callable(path, data):
try:
return schema(data)
except ValueError:
raise er.ValueInvalid('not a valid value', path)
except er.Invalid as e:
e.prepend(path)
raise
return validate_callable
def validate_value(path, data):
if data != schema:
raise er.ScalarInvalid('not a valid value', path)
return data
return validate_value
def _compile_itemsort():
'''return sort function of mappings'''
def is_extra(key_):
return key_ is Extra
def is_remove(key_):
return isinstance(key_, Remove)
def is_marker(key_):
return isinstance(key_, Marker)
def is_type(key_):
return inspect.isclass(key_)
def is_callable(key_):
return callable(key_)
# priority list for map sorting (in order of checking)
# We want Extra to match last, because it's a catch-all. On the other hand,
# Remove markers should match first (since invalid values will not
# raise an Error, instead the validator will check if other schemas match
# the same value).
priority = [(1, is_remove), # Remove highest priority after values
(2, is_marker), # then other Markers
(4, is_type), # types/classes lowest before Extra
(3, is_callable), # callables after markers
(5, is_extra)] # Extra lowest priority
def item_priority(item_):
key_ = item_[0]
for i, check_ in priority:
if check_(key_):
return i
# values have hightest priorities
return 0
return item_priority
_sort_item = _compile_itemsort()
def _iterate_mapping_candidates(schema):
"""Iterate over schema in a meaningful order."""
# Without this, Extra might appear first in the iterator, and fail to
# validate a key even though it's a Required that has its own validation,
# generating a false positive.
return sorted(iteritems(schema), key=_sort_item)
def _iterate_object(obj):
"""Return iterator over object attributes. Respect objects with
defined __slots__.
"""
d = {}
try:
d = vars(obj)
except TypeError:
# maybe we have named tuple here?
if hasattr(obj, '_asdict'):
d = obj._asdict()
for item in iteritems(d):
yield item
try:
slots = obj.__slots__
except AttributeError:
pass
else:
for key in slots:
if key != '__dict__':
yield (key, getattr(obj, key))
class Msg(object):
"""Report a user-friendly message if a schema fails to validate.
>>> validate = Schema(
... Msg(['one', 'two', int],
... 'should be one of "one", "two" or an integer'))
>>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
... validate(['three'])
Messages are only applied to invalid direct descendants of the schema:
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
>>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
... validate([['three']])
The type which is thrown can be overridden but needs to be a subclass of Invalid
>>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
... validate = Schema(Msg([int], 'should be int', cls=KeyError))
If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
>>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
>>> try:
... validate(['three'])
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], er.RangeInvalid)
"""
def __init__(self, schema, msg, cls=None):
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("Msg can only use subclases of"
" Invalid as custom class")
self._schema = schema
self.schema = Schema(schema)
self.msg = msg
self.cls = cls
def __call__(self, v):
try:
return self.schema(v)
except er.Invalid as e:
if len(e.path) > 1:
raise e
else:
raise (self.cls or er.Invalid)(self.msg)
def __repr__(self):
return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
class Object(dict):
"""Indicate that we should work with attributes, not keys."""
def __init__(self, schema, cls=UNDEFINED):
self.cls = cls
super(Object, self).__init__(schema)
class VirtualPathComponent(str):
def __str__(self):
return '<' + self + '>'
def __repr__(self):
return self.__str__()
# Markers.py
class Marker(object):
"""Mark nodes for special treatment."""
def __init__(self, schema_, msg=None, description=None):
self.schema = schema_
self._schema = Schema(schema_)
self.msg = msg
self.description = description
def __call__(self, v):
try:
return self._schema(v)
except er.Invalid as e:
if not self.msg or len(e.path) > 1:
raise
raise er.Invalid(self.msg)
def __str__(self):
return str(self.schema)
def __repr__(self):
return repr(self.schema)
def __lt__(self, other):
if isinstance(other, Marker):
return self.schema < other.schema
return self.schema < other
def __hash__(self):
return hash(self.schema)
def __eq__(self, other):
return self.schema == other
def __ne__(self, other):
return not(self.schema == other)
class Optional(Marker):
"""Mark a node in the schema as optional, and optionally provide a default
>>> schema = Schema({Optional('key'): str})
>>> schema({})
{}
>>> schema = Schema({Optional('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Optional('key', default=list): list})
>>> schema({})
{'key': []}
If 'required' flag is set for an entire schema, optional keys aren't required
>>> schema = Schema({
... Optional('key'): str,
... 'key2': str
... }, required=True)
>>> schema({'key2':'value'})
{'key2': 'value'}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Optional, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Exclusive(Optional):
"""Mark a node in the schema as exclusive.
Exclusive keys inherited from Optional:
>>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
>>> schema({'alpha': 30})
{'alpha': 30}
Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
... schema({'alpha': 30, 'beta': 45})
For example, API can provides multiple types of authentication, but only one works in the same time:
>>> msg = 'Please, use only one type of authentication at the same time.'
>>> schema = Schema({
... Exclusive('classic', 'auth', msg=msg):{
... Required('email'): basestring,
... Required('password'): basestring
... },
... Exclusive('internal', 'auth', msg=msg):{
... Required('secret_key'): basestring
... },
... Exclusive('social', 'auth', msg=msg):{
... Required('social_network'): basestring,
... Required('token'): basestring
... }
... })
>>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
"""
def __init__(self, schema, group_of_exclusion, msg=None, description=None):
super(Exclusive, self).__init__(schema, msg=msg,
description=description)
self.group_of_exclusion = group_of_exclusion
class Inclusive(Optional):
""" Mark a node in the schema as inclusive.
Inclusive keys inherited from Optional:
>>> schema = Schema({
... Inclusive('filename', 'file'): str,
... Inclusive('mimetype', 'file'): str
... })
>>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
>>> data == schema(data)
True
Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
>>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
... schema({'filename': 'dog.jpg'})
If none of the keys in the group are present, it is accepted:
>>> schema({})
{}
For example, API can return 'height' and 'width' together, but not separately.
>>> msg = "Height and width must exist together"
>>> schema = Schema({
... Inclusive('height', 'size', msg=msg): int,
... Inclusive('width', 'size', msg=msg): int
... })
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'height': 100})
>>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
... schema({'width': 100})
>>> data = {'height': 100, 'width': 100}
>>> data == schema(data)
True
"""
def __init__(self, schema, group_of_inclusion,
msg=None, description=None, default=UNDEFINED):
super(Inclusive, self).__init__(schema, msg=msg,
default=default,
description=description)
self.group_of_inclusion = group_of_inclusion
class Required(Marker):
"""Mark a node in the schema as being required, and optionally provide a default value.
>>> schema = Schema({Required('key'): str})
>>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
... schema({})
>>> schema = Schema({Required('key', default='value'): str})
>>> schema({})
{'key': 'value'}
>>> schema = Schema({Required('key', default=list): list})
>>> schema({})
{'key': []}
"""
def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
super(Required, self).__init__(schema, msg=msg,
description=description)
self.default = default_factory(default)
class Remove(Marker):
"""Mark a node in the schema to be removed and excluded from the validated
output. Keys that fail validation will not raise ``Invalid``. Instead, these
keys will be treated as extras.
>>> schema = Schema({str: int, Remove(int): str})
>>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
... schema({'keep': 1, 1: 1.0})
>>> schema({1: 'red', 'red': 1, 2: 'green'})
{'red': 1}
>>> schema = Schema([int, Remove(float), Extra])
>>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
[1, 2, 3, 5, '7']
"""
def __call__(self, v):
super(Remove, self).__call__(v)
return self.__class__
def __repr__(self):
return "Remove(%r)" % (self.schema,)
def __hash__(self):
return object.__hash__(self)
def message(default=None, cls=None):
"""Convenience decorator to allow functions to provide a message.
Set a default message:
>>> @message('not an integer')
... def isint(v):
... return int(v)
>>> validate = Schema(isint())
>>> with raises(er.MultipleInvalid, 'not an integer'):
... validate('a')
The message can be overridden on a per validator basis:
>>> validate = Schema(isint('bad'))
>>> with raises(er.MultipleInvalid, 'bad'):
... validate('a')
The class thrown too:
>>> class IntegerInvalid(er.Invalid): pass
>>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
>>> try:
... validate('a')
... except er.MultipleInvalid as e:
... assert isinstance(e.errors[0], IntegerInvalid)
"""
if cls and not issubclass(cls, er.Invalid):
raise er.SchemaError("message can only use subclases of Invalid as custom class")
def decorator(f):
@wraps(f)
def check(msg=None, clsoverride=None):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError:
raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
return wrapper
return check
return decorator
def _args_to_dict(func, args):
"""Returns argument names as values as key-value pairs."""
if sys.version_info >= (3, 0):
arg_count = func.__code__.co_argcount
arg_names = func.__code__.co_varnames[:arg_count]
else:
arg_count = func.func_code.co_argcount
arg_names = func.func_code.co_varnames[:arg_count]
arg_value_list = list(args)
arguments = dict((arg_name, arg_value_list[i])
for i, arg_name in enumerate(arg_names)
if i < len(arg_value_list))
return arguments
def _merge_args_with_kwargs(args_dict, kwargs_dict):
"""Merge args with kwargs."""
ret = args_dict.copy()
ret.update(kwargs_dict)
return ret
def validate(*a, **kw):
"""Decorator for validating arguments of a function against a given schema.
Set restrictions for arguments:
>>> @validate(arg1=int, arg2=int)
... def foo(arg1, arg2):
... return arg1 * arg2
Set restriction for returned value:
>>> @validate(arg=int, __return__=int)
... def bar(arg1):
... return arg1 * 2
"""
RETURNS_KEY = '__return__'
def validate_schema_decorator(func):
returns_defined = False
returns = None
schema_args_dict = _args_to_dict(func, a)
schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
if RETURNS_KEY in schema_arguments:
returns_defined = True
returns = schema_arguments[RETURNS_KEY]
del schema_arguments[RETURNS_KEY]
input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
if len(schema_arguments) != 0 else lambda x: x)
output_schema = Schema(returns) if returns_defined else lambda x: x
@wraps(func)
def func_wrapper(*args, **kwargs):
args_dict = _args_to_dict(func, args)
arguments = _merge_args_with_kwargs(args_dict, kwargs)
validated_arguments = input_schema(arguments)
output = func(**validated_arguments)
return output_schema(output)
return func_wrapper
return validate_schema_decorator
|
|
import json
from django.contrib.auth import get_user_model
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.http import urlencode
from wagtail.admin.views.chooser import can_choose_page
from wagtail.core.models import Locale, Page, UserPagePermissionsProxy
from wagtail.tests.testapp.models import (
EventIndex,
EventPage,
SimplePage,
SingleEventPage,
)
from wagtail.tests.utils import WagtailTestUtils
class TestChooserBrowse(TestCase, WagtailTestUtils):
def setUp(self):
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(title="foobarbaz", content="hello")
self.root_page.add_child(instance=self.child_page)
self.login()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_choose_page"), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
def test_construct_queryset_hook(self):
page = SimplePage(title="Test shown", content="hello")
Page.get_first_root_node().add_child(instance=page)
page_not_shown = SimplePage(title="Test not shown", content="hello")
Page.get_first_root_node().add_child(instance=page_not_shown)
def filter_pages(pages, request):
return pages.filter(id=page.id)
with self.register_hook("construct_page_chooser_queryset", filter_pages):
response = self.get()
self.assertEqual(len(response.context["pages"]), 1)
self.assertEqual(response.context["pages"][0].specific, page)
class TestCanChooseRootFlag(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_choose_page"), params)
def test_cannot_choose_root_by_default(self):
response = self.get()
self.assertNotContains(response, "/admin/pages/1/edit/")
def test_can_choose_root(self):
response = self.get({"can_choose_root": "true"})
self.assertContains(response, "/admin/pages/1/edit/")
class TestChooserBrowseChild(TestCase, WagtailTestUtils):
def setUp(self):
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(title="foobarbaz", content="hello")
self.root_page.add_child(instance=self.child_page)
self.login()
def get(self, params={}):
return self.client.get(
reverse("wagtailadmin_choose_page_child", args=(self.root_page.id,)), params
)
def get_invalid(self, params={}):
return self.client.get(
reverse("wagtailadmin_choose_page_child", args=(9999999,)), params
)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
def test_get_invalid(self):
self.assertEqual(self.get_invalid().status_code, 404)
def test_with_page_type(self):
# Add a page that is not a SimplePage
event_page = EventPage(
title="event",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
self.root_page.add_child(instance=event_page)
# Add a page with a child page
event_index_page = EventIndex(
title="events",
)
self.root_page.add_child(instance=event_index_page)
event_index_page.add_child(
instance=EventPage(
title="other event",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
)
# Send request
response = self.get({"page_type": "tests.simplepage"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
self.assertEqual(response.context["page_type_string"], "tests.simplepage")
pages = {page.id: page for page in response.context["pages"].object_list}
# Child page is a simple page directly underneath root
# so should appear in the list
self.assertIn(self.child_page.id, pages)
self.assertTrue(pages[self.child_page.id].can_choose)
self.assertFalse(pages[self.child_page.id].can_descend)
# Event page is not a simple page and is not descendable either
# so should not appear in the list
self.assertNotIn(event_page.id, pages)
# Event index page is not a simple page but has a child and is therefore descendable
# so should appear in the list
self.assertIn(event_index_page.id, pages)
self.assertFalse(pages[event_index_page.id].can_choose)
self.assertTrue(pages[event_index_page.id].can_descend)
def test_with_url_extended_page_type(self):
# Add a page that overrides the url path
single_event_page = SingleEventPage(
title="foo",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
self.root_page.add_child(instance=single_event_page)
# Send request
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
page_urls = [page.url for page in response.context["pages"]]
self.assertIn("/foo/pointless-suffix/", page_urls)
def test_with_blank_page_type(self):
# a blank page_type parameter should be equivalent to an absent parameter
# (or an explicit page_type of wagtailcore.page)
response = self.get({"page_type": ""})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
def test_with_multiple_page_types(self):
# Add a page that is not a SimplePage
event_page = EventPage(
title="event",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
self.root_page.add_child(instance=event_page)
# Send request
response = self.get({"page_type": "tests.simplepage,tests.eventpage"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
self.assertEqual(
response.context["page_type_string"], "tests.simplepage,tests.eventpage"
)
pages = {page.id: page for page in response.context["pages"].object_list}
# Simple page in results, as before
self.assertIn(self.child_page.id, pages)
self.assertTrue(pages[self.child_page.id].can_choose)
# Event page should now also be choosable
self.assertIn(event_page.id, pages)
self.assertTrue(pages[self.child_page.id].can_choose)
def test_with_unknown_page_type(self):
response = self.get({"page_type": "foo.bar"})
self.assertEqual(response.status_code, 404)
def test_with_bad_page_type(self):
response = self.get({"page_type": "wagtailcore.site"})
self.assertEqual(response.status_code, 404)
def test_with_invalid_page_type(self):
response = self.get({"page_type": "foo"})
self.assertEqual(response.status_code, 404)
def test_with_admin_display_title(self):
# Check the display of the child page title when it's a child
response = self.get({"page_type": "wagtailcore.Page"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
html = response.json().get("html")
self.assertInHTML("foobarbaz (simple page)", html)
# The data-title attribute should not use the custom admin display title,
# because JS code that uses that attribute (e.g. the rich text editor)
# should use the real page title.
self.assertIn('data-title="foobarbaz"', html)
def test_parent_with_admin_display_title(self):
# Add another child under child_page so it renders a chooser list
leaf_page = SimplePage(title="quux", content="goodbye")
self.child_page.add_child(instance=leaf_page)
# Use the child page as the chooser parent
response = self.client.get(
reverse("wagtailadmin_choose_page_child", args=(self.child_page.id,)),
params={"page_type": "wagtailcore.Page"},
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
self.assertInHTML("foobarbaz (simple page)", response.json().get("html"))
self.assertInHTML("quux (simple page)", response.json().get("html"))
def test_admin_display_title_breadcrumb(self):
# Add another child under child_page so we get breadcrumbs
leaf_page = SimplePage(title="quux", content="goodbye")
self.child_page.add_child(instance=leaf_page)
# Use the leaf page as the chooser parent, so child is in the breadcrumbs
response = self.client.get(
reverse("wagtailadmin_choose_page_child", args=(leaf_page.id,))
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/browse.html")
# Look for a link element in the breadcrumbs with the admin title
expected = """
<li class="breadcrumb-item">
<a href="/admin/choose-page/{page_id}/?" class="breadcrumb-link navigate-pages">{page_title}
<svg class="icon icon-arrow-right arrow_right_icon" aria-hidden="true">
<use href="#icon-arrow-right"></use>
</svg>
</a>
</li>
""".format(
page_id=self.child_page.id,
page_title="foobarbaz (simple page)",
)
self.assertTagInHTML(expected, response.json().get("html"))
def setup_pagination_test_data(self):
# Create lots of pages
for i in range(100):
new_page = SimplePage(
title="foobarbaz",
slug="foobarbaz-%d" % i,
content="hello",
)
self.root_page.add_child(instance=new_page)
def test_pagination_basic(self):
self.setup_pagination_test_data()
response = self.get()
self.assertEqual(response.context["pages"].paginator.num_pages, 5)
self.assertEqual(response.context["pages"].number, 1)
def test_pagination_another_page(self):
self.setup_pagination_test_data()
response = self.get({"p": 2})
self.assertEqual(response.context["pages"].number, 2)
def test_pagination_invalid_page(self):
self.setup_pagination_test_data()
response = self.get({"p": "foo"})
self.assertEqual(response.context["pages"].number, 1)
def test_pagination_out_of_range_page(self):
self.setup_pagination_test_data()
response = self.get({"p": 100})
self.assertEqual(response.context["pages"].number, 5)
class TestChooserSearch(TestCase, WagtailTestUtils):
def setUp(self):
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(title="foobarbaz", content="hello")
self.root_page.add_child(instance=self.child_page)
self.login()
def get(self, params=None):
return self.client.get(reverse("wagtailadmin_choose_page_search"), params or {})
def test_simple(self):
response = self.get({"q": "foobarbaz"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/_search_results.html")
self.assertContains(response, "There is 1 match")
self.assertContains(response, "foobarbaz")
def test_result_uses_custom_admin_display_title(self):
single_event_page = SingleEventPage(
title="Lunar event",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
self.root_page.add_child(instance=single_event_page)
response = self.get({"q": "lunar"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/_search_results.html")
self.assertContains(response, "Lunar event (single event)")
def test_search_no_results(self):
response = self.get({"q": "quux"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "There are 0 matches")
def test_with_page_type(self):
# Add a page that is not a SimplePage
event_page = EventPage(
title="foo",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
self.root_page.add_child(instance=event_page)
# Send request
response = self.get({"q": "foo", "page_type": "tests.simplepage"})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/_search_results.html")
self.assertEqual(response.context["page_type_string"], "tests.simplepage")
pages = {page.id: page for page in response.context["pages"]}
self.assertIn(self.child_page.id, pages)
# Not a simple page
self.assertNotIn(event_page.id, pages)
def test_with_blank_page_type(self):
# a blank page_type parameter should be equivalent to an absent parameter
# (or an explicit page_type of wagtailcore.page)
response = self.get({"q": "foobarbaz", "page_type": ""})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/_search_results.html")
self.assertContains(response, "There is 1 match")
self.assertContains(response, "foobarbaz")
def test_with_multiple_page_types(self):
# Add a page that is not a SimplePage
event_page = EventPage(
title="foo",
location="the moon",
audience="public",
cost="free",
date_from="2001-01-01",
)
self.root_page.add_child(instance=event_page)
# Send request
response = self.get(
{"q": "foo", "page_type": "tests.simplepage,tests.eventpage"}
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/_search_results.html")
self.assertEqual(
response.context["page_type_string"], "tests.simplepage,tests.eventpage"
)
pages = {page.id: page for page in response.context["pages"]}
# Simple page in results, as before
self.assertIn(self.child_page.id, pages)
# Event page should now also be choosable
self.assertIn(event_page.id, pages)
def test_with_unknown_page_type(self):
response = self.get({"page_type": "foo.bar"})
self.assertEqual(response.status_code, 404)
def test_with_bad_page_type(self):
response = self.get({"page_type": "wagtailcore.site"})
self.assertEqual(response.status_code, 404)
def test_with_invalid_page_type(self):
response = self.get({"page_type": "foo"})
self.assertEqual(response.status_code, 404)
def test_construct_queryset_hook(self):
page = SimplePage(title="Test shown", content="hello")
self.root_page.add_child(instance=page)
page_not_shown = SimplePage(title="Test not shown", content="hello")
self.root_page.add_child(instance=page_not_shown)
def filter_pages(pages, request):
return pages.filter(id=page.id)
with self.register_hook("construct_page_chooser_queryset", filter_pages):
response = self.get({"q": "Test"})
self.assertEqual(len(response.context["pages"]), 1)
self.assertEqual(response.context["pages"][0].specific, page)
class TestAutomaticRootPageDetection(TestCase, WagtailTestUtils):
def setUp(self):
self.tree_root = Page.objects.get(id=1)
self.home_page = Page.objects.get(id=2)
self.about_page = self.home_page.add_child(
instance=SimplePage(title="About", content="About Foo")
)
self.contact_page = self.about_page.add_child(
instance=SimplePage(title="Contact", content="Content Foo")
)
self.people_page = self.about_page.add_child(
instance=SimplePage(title="People", content="The people of Foo")
)
self.event_index = self.make_event_section("Events")
self.login()
def make_event_section(self, name):
event_index = self.home_page.add_child(instance=EventIndex(title=name))
event_index.add_child(
instance=EventPage(
title="First Event",
location="Bar",
audience="public",
cost="free",
date_from="2001-01-01",
)
)
event_index.add_child(
instance=EventPage(
title="Second Event",
location="Baz",
audience="public",
cost="free",
date_from="2001-01-01",
)
)
return event_index
def get_best_root(self, params={}):
response = self.client.get(reverse("wagtailadmin_choose_page"), params)
return response.context["parent_page"].specific
def test_no_type_filter(self):
self.assertEqual(self.get_best_root(), self.tree_root)
def test_type_page(self):
self.assertEqual(
self.get_best_root({"page_type": "wagtailcore.Page"}), self.tree_root
)
def test_type_eventpage(self):
"""
The chooser should start at the EventIndex that holds all the
EventPages.
"""
self.assertEqual(
self.get_best_root({"page_type": "tests.EventPage"}), self.event_index
)
def test_type_eventpage_two_indexes(self):
"""
The chooser should start at the home page, as there are two
EventIndexes with EventPages.
"""
self.make_event_section("Other events")
self.assertEqual(
self.get_best_root({"page_type": "tests.EventPage"}), self.home_page
)
def test_type_simple_page(self):
"""
The chooser should start at the home page, as all SimplePages are
directly under it
"""
self.assertEqual(
self.get_best_root({"page_type": "tests.BusinessIndex"}), self.tree_root
)
def test_type_missing(self):
"""
The chooser should start at the root, as there are no BusinessIndexes
"""
self.assertEqual(
self.get_best_root({"page_type": "tests.BusinessIndex"}), self.tree_root
)
class TestChooserExternalLink(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.internal_page = SimplePage(title="About", content="About Foo")
Page.objects.get(pk=2).add_child(instance=self.internal_page)
def get(self, params={}):
return self.client.get(
reverse("wagtailadmin_choose_page_external_link"), params
)
def post(self, post_data={}, url_params={}):
url = reverse("wagtailadmin_choose_page_external_link")
if url_params:
url += "?" + urlencode(url_params)
return self.client.post(url, post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/external_link.html")
def test_prepopulated_form(self):
response = self.get(
{"link_text": "Torchbox", "link_url": "https://torchbox.com/"}
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Torchbox")
self.assertContains(response, "https://torchbox.com/")
def test_create_link(self):
response = self.post(
{
"external-link-chooser-url": "http://www.example.com/",
"external-link-chooser-link_text": "example",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "external_link_chosen")
self.assertEqual(response_json["result"]["url"], "http://www.example.com/")
self.assertEqual(
response_json["result"]["title"], "example"
) # When link text is given, it is used
self.assertIs(response_json["result"]["prefer_this_title_as_link_text"], True)
def test_create_link_without_text(self):
response = self.post({"external-link-chooser-url": "http://www.example.com/"})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "external_link_chosen")
self.assertEqual(response_json["result"]["url"], "http://www.example.com/")
self.assertEqual(
response_json["result"]["title"], "http://www.example.com/"
) # When no text is given, it uses the url
self.assertIs(response_json["result"]["prefer_this_title_as_link_text"], False)
def test_notice_changes_to_link_text(self):
response = self.post(
{
"external-link-chooser-url": "http://www.example.com/",
"external-link-chooser-link_text": "example",
}, # POST data
{
"link_url": "http://old.example.com/",
"link_text": "example",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "http://www.example.com/")
self.assertEqual(result["title"], "example")
# no change to link text, so prefer the existing link/selection content where available
self.assertIs(result["prefer_this_title_as_link_text"], False)
response = self.post(
{
"external-link-chooser-url": "http://www.example.com/",
"external-link-chooser-link_text": "new example",
}, # POST data
{
"link_url": "http://old.example.com/",
"link_text": "example",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "http://www.example.com/")
self.assertEqual(result["title"], "new example")
# link text has changed, so tell the caller to use it
self.assertIs(result["prefer_this_title_as_link_text"], True)
def test_invalid_url(self):
response = self.post(
{
"external-link-chooser-url": "ntp://www.example.com",
"external-link-chooser-link_text": "example",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(
response_json["step"], "external_link"
) # indicates failure / show error message
self.assertContains(response, "Enter a valid URL.")
def test_allow_local_url(self):
response = self.post(
{
"external-link-chooser-url": "/admin/",
"external-link-chooser-link_text": "admin",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(
response_json["step"], "external_link_chosen"
) # indicates success / post back to calling page
self.assertEqual(response_json["result"]["url"], "/admin/")
self.assertEqual(response_json["result"]["title"], "admin")
def test_convert_external_to_internal_link(self):
response = self.post(
{
"external-link-chooser-url": "http://localhost/about/",
"external-link-chooser-link_text": "about",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "external_link_chosen")
self.assertEqual(response_json["result"]["url"], "/about/")
self.assertEqual(response_json["result"]["id"], self.internal_page.pk)
def test_convert_external_link_with_query_parameters_to_internal_link(self):
response = self.post(
{
"external-link-chooser-url": "http://localhost/about?test=1",
"external-link-chooser-link_text": "about",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
# Query parameters will get stripped, so the user should get asked to confirm the conversion
self.assertEqual(response_json["step"], "confirm_external_to_internal")
self.assertEqual(
response_json["external"]["url"], "http://localhost/about?test=1"
)
self.assertEqual(response_json["internal"]["id"], self.internal_page.pk)
def test_convert_relative_external_link_to_internal_link(self):
response = self.post(
{
"external-link-chooser-url": "/about/",
"external-link-chooser-link_text": "about",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "external_link_chosen")
self.assertEqual(response_json["result"]["url"], "/about/")
self.assertEqual(response_json["result"]["id"], self.internal_page.pk)
@override_settings(WAGTAILADMIN_EXTERNAL_LINK_CONVERSION="")
def test_no_conversion_external_to_internal_link_when_disabled(self):
url = "http://localhost/about/"
title = "about"
response = self.post(
{"external-link-chooser-url": url, "external-link-chooser-link_text": title}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "external_link_chosen")
self.assertEqual(response_json["result"]["url"], url)
self.assertEqual(response_json["result"]["title"], title)
@override_settings(WAGTAILADMIN_EXTERNAL_LINK_CONVERSION="exact")
def test_no_confirm_external_to_internal_link_when_exact(self):
url = "http://localhost/about?test=1"
title = "about"
response = self.post(
{"external-link-chooser-url": url, "external-link-chooser-link_text": title}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
# Query parameters will get stripped, so this link should be left as an external url with the 'exact' setting
self.assertEqual(response_json["step"], "external_link_chosen")
self.assertEqual(response_json["result"]["url"], url)
self.assertEqual(response_json["result"]["title"], title)
@override_settings(WAGTAILADMIN_EXTERNAL_LINK_CONVERSION="confirm")
def test_convert_external_link_to_internal_link_with_confirm_setting(self):
url = "http://localhost/about/"
response = self.post(
{
"external-link-chooser-url": url,
"external-link-chooser-link_text": "about",
}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode())
# The url is identical, but the conversion setting is set to 'confirm'
# so the user should get asked to confirm the conversion
self.assertEqual(response_json["step"], "confirm_external_to_internal")
self.assertEqual(response_json["external"]["url"], url)
self.assertEqual(response_json["internal"]["id"], self.internal_page.pk)
class TestChooserAnchorLink(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_choose_page_anchor_link"), params)
def post(self, post_data={}, url_params={}):
url = reverse("wagtailadmin_choose_page_anchor_link")
if url_params:
url += "?" + urlencode(url_params)
return self.client.post(url, post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/anchor_link.html")
def test_prepopulated_form(self):
response = self.get(
{"link_text": "Example Anchor Text", "link_url": "exampleanchor"}
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Example Anchor Text")
self.assertContains(response, "exampleanchor")
def test_create_link(self):
response = self.post(
{
"anchor-link-chooser-url": "exampleanchor",
"anchor-link-chooser-link_text": "Example Anchor Text",
}
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "#exampleanchor")
self.assertEqual(
result["title"], "Example Anchor Text"
) # When link text is given, it is used
self.assertIs(result["prefer_this_title_as_link_text"], True)
def test_create_link_without_text(self):
response = self.post({"anchor-link-chooser-url": "exampleanchor"})
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "#exampleanchor")
self.assertEqual(
result["title"], "exampleanchor"
) # When no link text is given, it uses anchor
self.assertIs(result["prefer_this_title_as_link_text"], False)
def test_notice_changes_to_link_text(self):
response = self.post(
{
"anchor-link-chooser-url": "exampleanchor2",
"email-link-chooser-link_text": "Example Text",
}, # POST data
{
"link_url": "exampleanchor2",
"link_text": "Example Text",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "#exampleanchor2")
self.assertEqual(result["title"], "exampleanchor2")
# no change to link text, so prefer the existing link/selection content where available
self.assertIs(result["prefer_this_title_as_link_text"], True)
response = self.post(
{
"anchor-link-chooser-url": "exampleanchor2",
"anchor-link-chooser-link_text": "Example Anchor Test 2.1",
}, # POST data
{
"link_url": "exampleanchor",
"link_text": "Example Anchor Text",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "#exampleanchor2")
self.assertEqual(result["title"], "Example Anchor Test 2.1")
# link text has changed, so tell the caller to use it
self.assertIs(result["prefer_this_title_as_link_text"], True)
class TestChooserEmailLink(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_choose_page_email_link"), params)
def post(self, post_data={}, url_params={}):
url = reverse("wagtailadmin_choose_page_email_link")
if url_params:
url += "?" + urlencode(url_params)
return self.client.post(url, post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/email_link.html")
def test_prepopulated_form(self):
response = self.get({"link_text": "Example", "link_url": "example@example.com"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Example")
self.assertContains(response, "example@example.com")
def test_create_link(self):
response = self.post(
{
"email-link-chooser-email_address": "example@example.com",
"email-link-chooser-link_text": "contact",
}
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "mailto:example@example.com")
self.assertEqual(
result["title"], "contact"
) # When link text is given, it is used
self.assertIs(result["prefer_this_title_as_link_text"], True)
def test_create_link_without_text(self):
response = self.post(
{"email-link-chooser-email_address": "example@example.com"}
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "mailto:example@example.com")
self.assertEqual(
result["title"], "example@example.com"
) # When no link text is given, it uses the email
self.assertIs(result["prefer_this_title_as_link_text"], False)
def test_notice_changes_to_link_text(self):
response = self.post(
{
"email-link-chooser-email_address": "example2@example.com",
"email-link-chooser-link_text": "example",
}, # POST data
{
"link_url": "example@example.com",
"link_text": "example",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "mailto:example2@example.com")
self.assertEqual(result["title"], "example")
# no change to link text, so prefer the existing link/selection content where available
self.assertIs(result["prefer_this_title_as_link_text"], False)
response = self.post(
{
"email-link-chooser-email_address": "example2@example.com",
"email-link-chooser-link_text": "new example",
}, # POST data
{
"link_url": "example@example.com",
"link_text": "example",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "mailto:example2@example.com")
self.assertEqual(result["title"], "new example")
# link text has changed, so tell the caller to use it
self.assertIs(result["prefer_this_title_as_link_text"], True)
class TestChooserPhoneLink(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_choose_page_phone_link"), params)
def post(self, post_data={}, url_params={}):
url = reverse("wagtailadmin_choose_page_phone_link")
if url_params:
url += "?" + urlencode(url_params)
return self.client.post(url, post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/chooser/phone_link.html")
def test_prepopulated_form(self):
response = self.get({"link_text": "Example", "link_url": "+123456789"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Example")
self.assertContains(response, "+123456789")
def test_create_link(self):
response = self.post(
{
"phone-link-chooser-phone_number": "+123456789",
"phone-link-chooser-link_text": "call",
}
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "tel:+123456789")
self.assertEqual(result["title"], "call")
self.assertIs(result["prefer_this_title_as_link_text"], True)
def test_create_link_without_text(self):
response = self.post({"phone-link-chooser-phone_number": "+123456789"})
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "tel:+123456789")
self.assertEqual(
result["title"], "+123456789"
) # When no link text is given, it uses the phone number
self.assertIs(result["prefer_this_title_as_link_text"], False)
def test_notice_changes_to_link_text(self):
response = self.post(
{
"phone-link-chooser-phone_number": "+222222222",
"phone-link-chooser-link_text": "example",
}, # POST data
{
"link_url": "+111111111",
"link_text": "example",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "tel:+222222222")
self.assertEqual(result["title"], "example")
# no change to link text, so prefer the existing link/selection content where available
self.assertIs(result["prefer_this_title_as_link_text"], False)
response = self.post(
{
"phone-link-chooser-phone_number": "+222222222",
"phone-link-chooser-link_text": "new example",
}, # POST data
{
"link_url": "+111111111",
"link_text": "example",
}, # GET params - initial data
)
result = json.loads(response.content.decode())["result"]
self.assertEqual(result["url"], "tel:+222222222")
self.assertEqual(result["title"], "new example")
# link text has changed, so tell the caller to use it
self.assertIs(result["prefer_this_title_as_link_text"], True)
class TestCanChoosePage(TestCase, WagtailTestUtils):
fixtures = ["test.json"]
def setUp(self):
self.user = self.login()
self.permission_proxy = UserPagePermissionsProxy(self.user)
self.desired_classes = (Page,)
def test_can_choose_page(self):
homepage = Page.objects.get(url_path="/home/")
result = can_choose_page(homepage, self.permission_proxy, self.desired_classes)
self.assertTrue(result)
def test_with_user_no_permission(self):
homepage = Page.objects.get(url_path="/home/")
# event editor does not have permissions on homepage
event_editor = get_user_model().objects.get(email="eventeditor@example.com")
permission_proxy = UserPagePermissionsProxy(event_editor)
result = can_choose_page(
homepage, permission_proxy, self.desired_classes, user_perm="copy_to"
)
self.assertFalse(result)
def test_with_can_choose_root(self):
root = Page.objects.get(url_path="/")
result = can_choose_page(
root, self.permission_proxy, self.desired_classes, can_choose_root=True
)
self.assertTrue(result)
def test_with_can_not_choose_root(self):
root = Page.objects.get(url_path="/")
result = can_choose_page(
root, self.permission_proxy, self.desired_classes, can_choose_root=False
)
self.assertFalse(result)
@override_settings(WAGTAIL_I18N_ENABLED=True)
class TestPageChooserLocaleSelector(TestCase, WagtailTestUtils):
fixtures = ["test.json"]
LOCALE_SELECTOR_HTML = '<a href="javascript:void(0)" aria-label="English" class="c-dropdown__button u-btn-current">'
LOCALE_INDICATOR_HTML = '<use href="#icon-site"></use></svg>\n English'
def setUp(self):
self.root_page = Page.objects.get(id=2)
# Add child page
self.child_page = SimplePage(title="foobarbaz", content="hello")
self.root_page.add_child(instance=self.child_page)
self.fr_locale = Locale.objects.create(language_code="fr")
self.root_page_fr = self.root_page.copy_for_translation(self.fr_locale)
self.root_page_fr.title = "Bienvenue"
self.root_page_fr.save()
self.child_page_fr = self.child_page.copy_for_translation(self.fr_locale)
self.child_page_fr.save()
switch_to_french_url = self.get_choose_page_url(
self.fr_locale, parent_page_id=self.child_page_fr.pk
)
self.LOCALE_SELECTOR_HTML_FR = f'<a href="{switch_to_french_url}" aria-label="French" class="u-link is-live">'
self.login()
def get(self, parent_page_id):
return self.client.get(
reverse("wagtailadmin_choose_page_child", args=[parent_page_id])
)
def get_choose_page_url(self, locale=None, parent_page_id=None, html=True):
if parent_page_id is not None:
url = reverse("wagtailadmin_choose_page_child", args=[parent_page_id])
else:
url = reverse("wagtailadmin_choose_page")
suffix = ""
if parent_page_id is None:
# the locale param should only be appended at the root level
if locale is None:
locale = self.fr_locale
separator = "&" if html else "&"
suffix = f"{separator}locale={locale.language_code}"
return f"{url}?page_type=wagtailcore.page{suffix}"
def test_locale_selector_present_in_root_view(self):
response = self.client.get(reverse("wagtailadmin_choose_page"))
html = response.json().get("html")
self.assertIn(self.LOCALE_SELECTOR_HTML, html)
switch_to_french_url = self.get_choose_page_url(locale=self.fr_locale)
fr_selector = f'<a href="{switch_to_french_url}" aria-label="French" class="u-link is-live">'
self.assertIn(fr_selector, html)
def test_locale_selector(self):
response = self.get(self.child_page.pk)
html = response.json().get("html")
self.assertIn(self.LOCALE_SELECTOR_HTML, html)
self.assertIn(self.LOCALE_SELECTOR_HTML_FR, html)
def test_locale_selector_without_translation(self):
self.child_page_fr.delete()
response = self.get(self.child_page.pk)
html = response.json().get("html")
self.assertNotIn(self.LOCALE_SELECTOR_HTML, html)
self.assertNotIn(self.LOCALE_SELECTOR_HTML_FR, html)
def test_locale_selector_with_active_locale(self):
switch_to_french_url = self.get_choose_page_url(
locale=self.fr_locale, html=False
)
response = self.client.get(switch_to_french_url)
html = response.json().get("html")
self.assertNotIn(self.LOCALE_SELECTOR_HTML, html)
self.assertNotIn(f'data-title="{self.root_page.title}"', html)
self.assertIn(self.root_page_fr.title, html)
self.assertIn(
'<a href="javascript:void(0)" aria-label="French" class="c-dropdown__button u-btn-current">',
html,
)
switch_to_english_url = self.get_choose_page_url(
locale=Locale.objects.get(language_code="en")
)
self.assertIn(
f'<a href="{switch_to_english_url}" aria-label="English" class="u-link is-live">',
html,
)
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_selector_not_present_when_i18n_disabled(self):
response = self.get(self.child_page.pk)
html = response.json().get("html")
self.assertNotIn(self.LOCALE_SELECTOR_HTML, html)
self.assertNotIn(self.LOCALE_SELECTOR_HTML_FR, html)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for Federation Extension."""
import re
import jsonschema
import six
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
MAPPING_SCHEMA = {
"type": "object",
"required": ['rules'],
"properties": {
"rules": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"required": ['local', 'remote'],
"additionalProperties": False,
"properties": {
"local": {
"type": "array"
},
"remote": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"oneOf": [
{"$ref": "#/definitions/empty"},
{"$ref": "#/definitions/any_one_of"},
{"$ref": "#/definitions/not_any_of"}
],
}
}
}
}
}
},
"definitions": {
"empty": {
"type": "object",
"required": ['type'],
"properties": {
"type": {
"type": "string"
},
},
"additionalProperties": False,
},
"any_one_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'any_one_of'],
"properties": {
"type": {
"type": "string"
},
"any_one_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"not_any_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'not_any_of'],
"properties": {
"type": {
"type": "string"
},
"not_any_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
}
}
}
def validate_mapping_structure(ref):
v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
messages = ''
for error in sorted(v.iter_errors(ref), key=str):
messages = messages + error.message + "\n"
if messages:
raise exception.ValidationError(messages)
class RuleProcessor(object):
"""A class to process assertions and mapping rules."""
class _EvalType(object):
"""Mapping rule evaluation types."""
ANY_ONE_OF = 'any_one_of'
NOT_ANY_OF = 'not_any_of'
def __init__(self, rules):
"""Initialize RuleProcessor.
Example rules can be found at:
:class:`keystone.tests.mapping_fixtures`
:param rules: rules from a mapping
:type rules: dict
"""
self.rules = rules
def process(self, assertion_data):
"""Transform assertion to a dictionary of user name and group ids
based on mapping rules.
This function will iterate through the mapping rules to find
assertions that are valid.
:param assertion_data: an assertion containing values from an IdP
:type assertion_data: dict
Example assertion_data::
{
'Email': 'testacct@example.com',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester'
}
:returns: dictionary with user and group_ids
The expected return structure is::
{
'name': 'foobar',
'group_ids': ['abc123', 'def456']
}
"""
# Assertions will come in as string key-value pairs, and will use a
# semi-colon to indicate multiple values, i.e. groups.
# This will create a new dictionary where the values are arrays, and
# any multiple values are stored in the arrays.
assertion = dict((n, v.split(';')) for n, v in assertion_data.items()
if isinstance(v, six.string_types))
identity_values = []
for rule in self.rules:
direct_maps = self._verify_all_requirements(rule['remote'],
assertion)
# If the compare comes back as None, then the rule did not apply
# to the assertion data, go on to the next rule
if direct_maps is None:
continue
# If there are no direct mappings, then add the local mapping
# directly to the array of saved values. However, if there is
# a direct mapping, then perform variable replacement.
if not direct_maps:
identity_values += rule['local']
else:
for local in rule['local']:
new_local = self._update_local_mapping(local, direct_maps)
identity_values.append(new_local)
mapped_properties = self._transform(identity_values)
if mapped_properties.get('name') is None:
raise exception.Unauthorized(_("Could not map user"))
return mapped_properties
def _transform(self, identity_values):
"""Transform local mappings, to an easier to understand format.
Transform the incoming array to generate the return value for
the process function. Generating content for Keystone tokens will
be easier if some pre-processing is done at this level.
:param identity_values: local mapping from valid evaluations
:type identity_values: array of dict
Example identity_values::
[{'group': {'id': '0cd5e9'}, 'user': {'email': 'bob@example.com'}}]
:returns: dictionary with user name and group_ids.
"""
# initialize the group_ids as a set to eliminate duplicates
user_name = None
group_ids = set()
for identity_value in identity_values:
if 'user' in identity_value:
# if a mapping outputs more than one user name, log it
if user_name is not None:
LOG.warning(_('Ignoring user name %s'),
identity_value['user']['name'])
else:
user_name = identity_value['user']['name']
if 'group' in identity_value:
group_ids.add(identity_value['group']['id'])
return {'name': user_name, 'group_ids': list(group_ids)}
def _update_local_mapping(self, local, direct_maps):
"""Replace any {0}, {1} ... values with data from the assertion.
:param local: local mapping reference that needs to be updated
:type local: dict
:param direct_maps: list of identity values, used to update local
:type direct_maps: list
Example local::
{'user': {'name': '{0} {1}', 'email': '{2}'}}
Example direct_maps::
['Bob', 'Thompson', 'bob@example.com']
:returns: new local mapping reference with replaced values.
The expected return structure is::
{'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}}
"""
new = {}
for k, v in six.iteritems(local):
if isinstance(v, dict):
new_value = self._update_local_mapping(v, direct_maps)
else:
new_value = v.format(*direct_maps)
new[k] = new_value
return new
def _verify_all_requirements(self, requirements, assertion):
"""Go through the remote requirements of a rule, and compare against
the assertion.
If a value of ``None`` is returned, the rule with this assertion
doesn't apply.
If an array of zero length is returned, then there are no direct
mappings to be performed, but the rule is valid.
Otherwise, then it will return the values, in order, to be directly
mapped, again, the rule is valid.
:param requirements: list of remote requirements from rules
:type requirements: list
Example requirements::
[
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Customer"
]
}
]
:param assertion: dict of attributes from an IdP
:type assertion: dict
Example assertion::
{
'UserName': ['testacct'],
'LastName': ['Account'],
'orgPersonType': ['Tester'],
'Email': ['testacct@example.com'],
'FirstName': ['Test']
}
:returns: list of direct mappings or None.
"""
direct_maps = []
for requirement in requirements:
requirement_type = requirement['type']
regex = requirement.get('regex', False)
any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
if any_one_values is not None:
if self._evaluate_requirement(any_one_values,
requirement_type,
self._EvalType.ANY_ONE_OF,
regex,
assertion):
continue
else:
return None
not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
if not_any_values is not None:
if self._evaluate_requirement(not_any_values,
requirement_type,
self._EvalType.NOT_ANY_OF,
regex,
assertion):
continue
else:
return None
# If 'any_one_of' or 'not_any_of' are not found, then values are
# within 'type'. Attempt to find that 'type' within the assertion.
direct_map_values = assertion.get(requirement_type)
if direct_map_values:
direct_maps += direct_map_values
return direct_maps
def _evaluate_requirement(self, values, requirement_type,
eval_type, regex, assertion):
"""Evaluate the incoming requirement and assertion.
If the requirement type does not exist in the assertion data, then
return False. If regex is specified, then compare the values and
assertion values. Otherwise, grab the intersection of the values
and use that to compare against the evaluation type.
:param values: list of allowed values, defined in the requirement
:type values: list
:param requirement_type: key to look for in the assertion
:type requirement_type: string
:param eval_type: determine how to evaluate requirements
:type eval_type: string
:param regex: perform evaluation with regex
:type regex: boolean
:param assertion: dict of attributes from the IdP
:type assertion: dict
:returns: boolean, whether requirement is valid or not.
"""
assertion_values = assertion.get(requirement_type)
if not assertion_values:
return False
if regex:
return re.search(values[0], assertion_values[0])
any_match = bool(set(values).intersection(set(assertion_values)))
if any_match and eval_type == self._EvalType.ANY_ONE_OF:
return True
if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
return True
return False
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestManagedZone(unittest2.TestCase):
PROJECT = 'project'
ZONE_NAME = 'zone-name'
DESCRIPTION = 'ZONE DESCRIPTION'
DNS_NAME = 'test.example.com'
def _getTargetClass(self):
from gcloud.dns.zone import ManagedZone
return ManagedZone
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _setUpConstants(self):
import datetime
from gcloud._helpers import UTC
year = 2015
month = 7
day = 24
hour = 19
minute = 53
seconds = 19
micros = 6000
self.WHEN_STR = '%d-%02d-%02dT%02d:%02d:%02d.%06dZ' % (
year, month, day, hour, minute, seconds, micros)
self.WHEN = datetime.datetime(
year, month, day, hour, minute, seconds, micros, tzinfo=UTC)
self.ZONE_ID = 12345
def _makeResource(self):
self._setUpConstants()
return {
'name': self.ZONE_NAME,
'dnsName': self.DNS_NAME,
'description': self.DESCRIPTION,
'id': self.ZONE_ID,
'creationTime': self.WHEN_STR,
'nameServers': [
'ns-cloud1.googledomains.com',
'ns-cloud2.googledomains.com',
],
}
def _verifyReadonlyResourceProperties(self, zone, resource):
self.assertEqual(zone.zone_id, resource.get('id'))
if 'creationTime' in resource:
self.assertEqual(zone.created, self.WHEN)
else:
self.assertEqual(zone.created, None)
if 'nameServers' in resource:
self.assertEqual(zone.name_servers, resource['nameServers'])
else:
self.assertEqual(zone.name_servers, None)
def _verifyResourceProperties(self, zone, resource):
self._verifyReadonlyResourceProperties(zone, resource)
self.assertEqual(zone.name, resource.get('name'))
self.assertEqual(zone.dns_name, resource.get('dnsName'))
self.assertEqual(zone.description, resource.get('description'))
self.assertEqual(zone.zone_id, resource.get('id'))
self.assertEqual(zone.name_server_set, resource.get('nameServerSet'))
def test_ctor(self):
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
self.assertEqual(zone.name, self.ZONE_NAME)
self.assertEqual(zone.dns_name, self.DNS_NAME)
self.assertTrue(zone._client is client)
self.assertEqual(zone.project, client.project)
self.assertEqual(
zone.path,
'/projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME))
self.assertEqual(zone.zone_id, None)
self.assertEqual(zone.created, None)
self.assertEqual(zone.description, None)
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = {}
klass = self._getTargetClass()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE, client=client)
def test_from_api_repr_bare(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = {
'name': self.ZONE_NAME,
'dnsName': self.DNS_NAME,
}
klass = self._getTargetClass()
zone = klass.from_api_repr(RESOURCE, client=client)
self.assertTrue(zone._client is client)
self._verifyResourceProperties(zone, RESOURCE)
def test_from_api_repr_w_properties(self):
self._setUpConstants()
client = _Client(self.PROJECT)
RESOURCE = self._makeResource()
klass = self._getTargetClass()
zone = klass.from_api_repr(RESOURCE, client=client)
self.assertTrue(zone._client is client)
self._verifyResourceProperties(zone, RESOURCE)
def test_description_setter_bad_value(self):
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
with self.assertRaises(ValueError):
zone.description = 12345
def test_description_setter(self):
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
zone.description = 'DESCRIPTION'
self.assertEqual(zone.description, 'DESCRIPTION')
def test_name_server_set_setter_bad_value(self):
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
with self.assertRaises(ValueError):
zone.name_server_set = 12345
def test_name_server_set_setter(self):
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
zone.name_server_set = 'NAME_SERVER_SET'
self.assertEqual(zone.name_server_set, 'NAME_SERVER_SET')
def test_resource_record_set(self):
from gcloud.dns.resource_record_set import ResourceRecordSet
RRS_NAME = 'other.example.com'
RRS_TYPE = 'CNAME'
TTL = 3600
RRDATAS = ['www.example.com']
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
rrs = zone.resource_record_set(RRS_NAME, RRS_TYPE, TTL, RRDATAS)
self.assertTrue(isinstance(rrs, ResourceRecordSet))
self.assertEqual(rrs.name, RRS_NAME)
self.assertEqual(rrs.record_type, RRS_TYPE)
self.assertEqual(rrs.ttl, TTL)
self.assertEqual(rrs.rrdatas, RRDATAS)
self.assertTrue(rrs.zone is zone)
def test_changes(self):
from gcloud.dns.changes import Changes
client = _Client(self.PROJECT)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
changes = zone.changes()
self.assertTrue(isinstance(changes, Changes))
self.assertTrue(changes.zone is zone)
def test_create_w_bound_client(self):
PATH = 'projects/%s/managedZones' % self.PROJECT
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
zone.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'name': self.ZONE_NAME,
'dnsName': self.DNS_NAME,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(zone, RESOURCE)
def test_create_w_alternate_client(self):
PATH = 'projects/%s/managedZones' % self.PROJECT
DESCRIPTION = 'DESCRIPTION'
NAME_SERVER_SET = 'NAME_SERVER_SET'
RESOURCE = self._makeResource()
RESOURCE['nameServerSet'] = NAME_SERVER_SET
RESOURCE['description'] = DESCRIPTION
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1)
zone.name_server_set = NAME_SERVER_SET
zone.description = DESCRIPTION
zone.create(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'name': self.ZONE_NAME,
'dnsName': self.DNS_NAME,
'nameServerSet': NAME_SERVER_SET,
'description': DESCRIPTION,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(zone, RESOURCE)
def test_create_w_missing_output_properties(self):
# In the wild, the resource returned from 'zone.create' sometimes
# lacks 'creationTime' / 'lastModifiedTime'
PATH = 'projects/%s/managedZones' % (self.PROJECT,)
RESOURCE = self._makeResource()
del RESOURCE['creationTime']
del RESOURCE['id']
del RESOURCE['nameServers']
self.WHEN = None
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
zone.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'name': self.ZONE_NAME,
'dnsName': self.DNS_NAME,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(zone, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
self.assertFalse(zone.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1)
self.assertTrue(zone.exists(client=client2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_reload_w_bound_client(self):
PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
zone.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(zone, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)
RESOURCE = self._makeResource()
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1)
zone.reload(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(zone, RESOURCE)
def test_delete_w_bound_client(self):
PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)
conn = _Connection({})
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
zone.delete()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'DELETE')
self.assertEqual(req['path'], '/%s' % PATH)
def test_delete_w_alternate_client(self):
PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1)
zone.delete(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'DELETE')
self.assertEqual(req['path'], '/%s' % PATH)
def test_list_resource_record_sets_defaults(self):
from gcloud.dns.resource_record_set import ResourceRecordSet
PATH = 'projects/%s/managedZones/%s/rrsets' % (
self.PROJECT, self.ZONE_NAME)
TOKEN = 'TOKEN'
NAME_1 = 'www.example.com'
TYPE_1 = 'A'
TTL_1 = '86400'
RRDATAS_1 = ['123.45.67.89']
NAME_2 = 'alias.example.com'
TYPE_2 = 'CNAME'
TTL_2 = '3600'
RRDATAS_2 = ['www.example.com']
DATA = {
'nextPageToken': TOKEN,
'rrsets': [
{'kind': 'dns#resourceRecordSet',
'name': NAME_1,
'type': TYPE_1,
'ttl': TTL_1,
'rrdatas': RRDATAS_1},
{'kind': 'dns#resourceRecordSet',
'name': NAME_2,
'type': TYPE_2,
'ttl': TTL_2,
'rrdatas': RRDATAS_2},
]
}
conn = _Connection(DATA)
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
rrsets, token = zone.list_resource_record_sets()
self.assertEqual(len(rrsets), len(DATA['rrsets']))
for found, expected in zip(rrsets, DATA['rrsets']):
self.assertTrue(isinstance(found, ResourceRecordSet))
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.record_type, expected['type'])
self.assertEqual(found.ttl, int(expected['ttl']))
self.assertEqual(found.rrdatas, expected['rrdatas'])
self.assertEqual(token, TOKEN)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
def test_list_resource_record_sets_explicit(self):
from gcloud.dns.resource_record_set import ResourceRecordSet
PATH = 'projects/%s/managedZones/%s/rrsets' % (
self.PROJECT, self.ZONE_NAME)
TOKEN = 'TOKEN'
NAME_1 = 'www.example.com'
TYPE_1 = 'A'
TTL_1 = '86400'
RRDATAS_1 = ['123.45.67.89']
NAME_2 = 'alias.example.com'
TYPE_2 = 'CNAME'
TTL_2 = '3600'
RRDATAS_2 = ['www.example.com']
DATA = {
'rrsets': [
{'kind': 'dns#resourceRecordSet',
'name': NAME_1,
'type': TYPE_1,
'ttl': TTL_1,
'rrdatas': RRDATAS_1},
{'kind': 'dns#resourceRecordSet',
'name': NAME_2,
'type': TYPE_2,
'ttl': TTL_2,
'rrdatas': RRDATAS_2},
]
}
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(DATA)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1)
rrsets, token = zone.list_resource_record_sets(
max_results=3, page_token=TOKEN, client=client2)
self.assertEqual(len(rrsets), len(DATA['rrsets']))
for found, expected in zip(rrsets, DATA['rrsets']):
self.assertTrue(isinstance(found, ResourceRecordSet))
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.record_type, expected['type'])
self.assertEqual(found.ttl, int(expected['ttl']))
self.assertEqual(found.rrdatas, expected['rrdatas'])
self.assertEqual(token, None)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'maxResults': 3, 'pageToken': TOKEN})
def test_list_changes_defaults(self):
from gcloud._helpers import _datetime_to_rfc3339
from gcloud.dns.changes import Changes
from gcloud.dns.resource_record_set import ResourceRecordSet
self._setUpConstants()
PATH = 'projects/%s/managedZones/%s/changes' % (
self.PROJECT, self.ZONE_NAME)
TOKEN = 'TOKEN'
NAME_1 = 'www.example.com'
TYPE_1 = 'A'
TTL_1 = '86400'
RRDATAS_1 = ['123.45.67.89']
NAME_2 = 'alias.example.com'
TYPE_2 = 'CNAME'
TTL_2 = '3600'
RRDATAS_2 = ['www.example.com']
CHANGES_NAME = 'changeset_id'
DATA = {
'nextPageToken': TOKEN,
'changes': [{
'kind': 'dns#change',
'id': CHANGES_NAME,
'status': 'pending',
'startTime': _datetime_to_rfc3339(self.WHEN),
'additions': [
{'kind': 'dns#resourceRecordSet',
'name': NAME_1,
'type': TYPE_1,
'ttl': TTL_1,
'rrdatas': RRDATAS_1}],
'deletions': [
{'kind': 'dns#change',
'name': NAME_2,
'type': TYPE_2,
'ttl': TTL_2,
'rrdatas': RRDATAS_2}],
}]
}
conn = _Connection(DATA)
client = _Client(project=self.PROJECT, connection=conn)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client)
changes, token = zone.list_changes()
self.assertEqual(len(changes), len(DATA['changes']))
for found, expected in zip(changes, DATA['changes']):
self.assertTrue(isinstance(found, Changes))
self.assertEqual(found.name, CHANGES_NAME)
self.assertEqual(found.status, 'pending')
self.assertEqual(found.started, self.WHEN)
self.assertEqual(len(found.additions), len(expected['additions']))
for found_rr, expected_rr in zip(found.additions,
expected['additions']):
self.assertTrue(isinstance(found_rr, ResourceRecordSet))
self.assertEqual(found_rr.name, expected_rr['name'])
self.assertEqual(found_rr.record_type, expected_rr['type'])
self.assertEqual(found_rr.ttl, int(expected_rr['ttl']))
self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas'])
self.assertEqual(len(found.deletions), len(expected['deletions']))
for found_rr, expected_rr in zip(found.deletions,
expected['deletions']):
self.assertTrue(isinstance(found_rr, ResourceRecordSet))
self.assertEqual(found_rr.name, expected_rr['name'])
self.assertEqual(found_rr.record_type, expected_rr['type'])
self.assertEqual(found_rr.ttl, int(expected_rr['ttl']))
self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas'])
self.assertEqual(token, TOKEN)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
def test_list_changes_explicit(self):
from gcloud._helpers import _datetime_to_rfc3339
from gcloud.dns.changes import Changes
from gcloud.dns.resource_record_set import ResourceRecordSet
self._setUpConstants()
PATH = 'projects/%s/managedZones/%s/changes' % (
self.PROJECT, self.ZONE_NAME)
TOKEN = 'TOKEN'
NAME_1 = 'www.example.com'
TYPE_1 = 'A'
TTL_1 = '86400'
RRDATAS_1 = ['123.45.67.89']
NAME_2 = 'alias.example.com'
TYPE_2 = 'CNAME'
TTL_2 = '3600'
RRDATAS_2 = ['www.example.com']
CHANGES_NAME = 'changeset_id'
DATA = {
'changes': [{
'kind': 'dns#change',
'id': CHANGES_NAME,
'status': 'pending',
'startTime': _datetime_to_rfc3339(self.WHEN),
'additions': [
{'kind': 'dns#resourceRecordSet',
'name': NAME_1,
'type': TYPE_1,
'ttl': TTL_1,
'rrdatas': RRDATAS_1}],
'deletions': [
{'kind': 'dns#change',
'name': NAME_2,
'type': TYPE_2,
'ttl': TTL_2,
'rrdatas': RRDATAS_2}],
}]
}
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(DATA)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1)
changes, token = zone.list_changes(
max_results=3, page_token=TOKEN, client=client2)
self.assertEqual(len(changes), len(DATA['changes']))
for found, expected in zip(changes, DATA['changes']):
self.assertTrue(isinstance(found, Changes))
self.assertEqual(found.name, CHANGES_NAME)
self.assertEqual(found.status, 'pending')
self.assertEqual(found.started, self.WHEN)
self.assertEqual(len(found.additions), len(expected['additions']))
for found_rr, expected_rr in zip(found.additions,
expected['additions']):
self.assertTrue(isinstance(found_rr, ResourceRecordSet))
self.assertEqual(found_rr.name, expected_rr['name'])
self.assertEqual(found_rr.record_type, expected_rr['type'])
self.assertEqual(found_rr.ttl, int(expected_rr['ttl']))
self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas'])
self.assertEqual(len(found.deletions), len(expected['deletions']))
for found_rr, expected_rr in zip(found.deletions,
expected['deletions']):
self.assertTrue(isinstance(found_rr, ResourceRecordSet))
self.assertEqual(found_rr.name, expected_rr['name'])
self.assertEqual(found_rr.record_type, expected_rr['type'])
self.assertEqual(found_rr.ttl, int(expected_rr['ttl']))
self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas'])
self.assertEqual(token, None)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'maxResults': 3, 'pageToken': TOKEN})
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from gcloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from collections import defaultdict
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir
from pants.contrib.go.subsystems.fetcher_factory import FetcherFactory
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_task import GoTask
class GoFetch(GoTask):
"""Fetches third-party Go libraries."""
@classmethod
def subsystem_dependencies(cls):
return super(GoFetch, cls).subsystem_dependencies() + (FetcherFactory,)
@classmethod
def product_types(cls):
return ['go_remote_lib_src']
@classmethod
def register_options(cls, register):
register('--skip-meta-tag-resolution', advanced=True, type=bool, default=False,
removal_version='1.2.0',
removal_hint='Use --disallow-cloning-fetcher on scope go-fetchers instead.',
help='Whether to ignore meta tag resolution when resolving remote libraries.')
@property
def cache_target_dirs(self):
# TODO(John Sirois): See TODO in _transitive_download_remote_libs, re-consider how artifact
# caching works for fetches.
return True
def execute(self):
self.context.products.safe_create_data('go_remote_lib_src', lambda: defaultdict(str))
go_remote_libs = self.context.targets(self.is_remote_lib)
if not go_remote_libs:
return
undeclared_deps = self._transitive_download_remote_libs(set(go_remote_libs))
if undeclared_deps:
self._log_undeclared_deps(undeclared_deps)
raise TaskError('Failed to resolve transitive Go remote dependencies.')
def _log_undeclared_deps(self, undeclared_deps):
for dependee, deps in undeclared_deps.items():
self.context.log.error('{address} has remote dependencies which require local declaration:'
.format(address=dependee.address.reference()))
for dep_import_path, address in deps:
self.context.log.error('\t--> {import_path} (expected go_remote_library declaration '
'at {address})'.format(import_path=dep_import_path,
address=address.reference()))
def _get_fetcher(self, import_path):
return FetcherFactory.global_instance().get_fetcher(import_path)
def _fetch_pkg(self, gopath, pkg, rev):
"""Fetch the package and setup symlinks."""
fetcher = self._get_fetcher(pkg)
root = fetcher.root()
root_dir = os.path.join(self.workdir, 'fetches', root, rev)
# Only fetch each remote root once.
if not os.path.exists(root_dir):
with temporary_dir() as tmp_fetch_root:
fetcher.fetch(dest=tmp_fetch_root, rev=rev)
safe_mkdir(root_dir)
for path in os.listdir(tmp_fetch_root):
shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))
# TODO(John Sirois): Circle back and get get rid of this symlink tree.
# GoWorkspaceTask will further symlink a single package from the tree below into a
# target's workspace when it could just be linking from the fetch_dir. The only thing
# standing in the way is a determination of what we want to artifact cache. If we don't
# want to cache fetched zips, linking straight from the fetch_dir works simply. Otherwise
# thought needs to be applied to using the artifact cache directly or synthesizing a
# canonical owner target for the fetched files that 'child' targets (subpackages) can
# depend on and share the fetch from.
dest_dir = os.path.join(gopath, 'src', root)
# We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
# chroot to avoid collision; thus `clean=True`.
safe_mkdir(dest_dir, clean=True)
for path in os.listdir(root_dir):
os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path))
def _map_fetched_remote_source(self, go_remote_lib, gopath, all_known_remote_libs, resolved_remote_libs, undeclared_deps):
for remote_import_path in self._get_remote_import_paths(go_remote_lib.import_path, gopath=gopath):
fetcher = self._get_fetcher(remote_import_path)
remote_root = fetcher.root()
spec_path = os.path.join(go_remote_lib.target_base, remote_root)
package_path = GoRemoteLibrary.remote_package_path(remote_root, remote_import_path)
target_name = package_path or os.path.basename(remote_root)
address = Address(spec_path, target_name)
if not any(address == lib.address for lib in all_known_remote_libs):
try:
# If we've already resolved a package from this remote root, its ok to define an
# implicit synthetic remote target for all other packages in the same remote root.
same_remote_libs = [lib for lib in all_known_remote_libs if spec_path == lib.address.spec_path]
implicit_ok = any(same_remote_libs)
# If we're creating a synthetic remote target, we should pin it to the same
# revision as the rest of the library.
rev = None
if implicit_ok:
rev = same_remote_libs[0].rev
remote_lib = self._resolve(go_remote_lib, address, package_path, rev, implicit_ok)
resolved_remote_libs.add(remote_lib)
all_known_remote_libs.add(remote_lib)
except self.UndeclaredRemoteLibError as e:
undeclared_deps[go_remote_lib].add((remote_import_path, e.address))
self.context.build_graph.inject_dependency(go_remote_lib.address, address)
def _transitive_download_remote_libs(self, go_remote_libs, all_known_remote_libs=None):
"""Recursively attempt to resolve / download all remote transitive deps of go_remote_libs.
Returns a dict<GoRemoteLibrary, set<tuple<str, Address>>>, which maps a go remote library to a
set of unresolved remote dependencies, each dependency expressed as a tuple containing the
the import path of the dependency and the expected target address. If all transitive
dependencies were successfully resolved, returns an empty dict.
Downloads as many invalidated transitive dependencies as possible, and returns as many
undeclared dependencies as possible. However, because the dependencies of a remote library
can only be determined _after_ it has been downloaded, a transitive dependency of an undeclared
remote library will never be detected.
Because go_remote_libraries do not declare dependencies (rather, they are inferred), injects
all successfully resolved transitive dependencies into the build graph.
"""
if not go_remote_libs:
return {}
all_known_remote_libs = all_known_remote_libs or set()
all_known_remote_libs.update(go_remote_libs)
resolved_remote_libs = set()
undeclared_deps = defaultdict(set)
go_remote_lib_src = self.context.products.get_data('go_remote_lib_src')
with self.invalidated(go_remote_libs) as invalidation_check:
for vt in invalidation_check.all_vts:
go_remote_lib = vt.target
gopath = vt.results_dir
if not vt.valid:
self._fetch_pkg(gopath, go_remote_lib.import_path, go_remote_lib.rev)
self._map_fetched_remote_source(go_remote_lib, gopath, all_known_remote_libs,
resolved_remote_libs, undeclared_deps)
go_remote_lib_src[go_remote_lib] = os.path.join(gopath, 'src', go_remote_lib.import_path)
# Recurse after the invalidated block, so the libraries we downloaded are now "valid"
# and thus we don't try to download a library twice.
trans_undeclared_deps = self._transitive_download_remote_libs(resolved_remote_libs,
all_known_remote_libs)
undeclared_deps.update(trans_undeclared_deps)
return undeclared_deps
class UndeclaredRemoteLibError(Exception):
def __init__(self, address):
self.address = address
def _resolve(self, dependent_remote_lib, address, pkg, rev, implicit_ok):
"""Resolves the GoRemoteLibrary at `address` defining the given `pkg`.
If `implicit_ok` is True, then a GoRemoteLibrary to own `pkg` is always synthesized if it does
not already exist; otherwise the address must already exist in the build graph (a BUILD file
must exist on disk that owns the given `pkg` and declares a `rev` for it).
:param dependent_remote_lib: The remote library that depends on the remote `pkg`.
:type: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:param address: The address of the remote library that should own `pkg`.
:type: :class:`pants.base.Address`
:param string pkg: The remote package path whose owning target needs to be resolved.
:param string rev: The revision of the package. None defaults to `master`.
:param bool implicit_ok: `False` if the given `address` must be defined in a BUILD file on disk;
otherwise a remote library to own `pkg` will always be created and
returned.
:returns: The resulting resolved remote library after injecting it in the build graph.
:rtype: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:raises: :class:`GoFetch.UndeclaredRemoteLibError`: If no BUILD file exists for the remote root
`pkg` lives in.
"""
try:
self.context.build_graph.inject_address_closure(address)
except AddressLookupError:
if implicit_ok:
self.context.add_new_target(address=address,
target_base=dependent_remote_lib.target_base,
target_type=GoRemoteLibrary,
pkg=pkg,
rev=rev)
else:
raise self.UndeclaredRemoteLibError(address)
return self.context.build_graph.get_target(address)
@staticmethod
def _is_relative(import_path):
return import_path.startswith('.')
def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given remote Go `pkg`.
NB: This only includes production code imports, no test code imports.
"""
import_listing = self.import_oracle.list_imports(pkg, gopath=gopath)
return [imp for imp in import_listing.imports
if (not self.import_oracle.is_go_internal_import(imp) and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))]
|
|
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# BUG nnnn: 2012.09.26: Search for a route from DT Mpls to DT St Paul (i.e.,
# the 94) at 9 AM and you may have to bike to a slow bus -- why
# isn't the express bus suggested?
import os
# FIXME: Bug NNNN: Replace sqlite3 table with postgres table, and just keep a
# copy of the transit db alongside the other cyclopath tables, but in a
# different schema.
# ^^^ Maybe this doesn't matter. Is the sqlite3 db closed once the route finder
# is loaded? Is the load time so slow that we should bother importing into
# Postgres? Is there an easy way to import a sqlite3 file into Postgres?
import sqlite3
import time
import traceback
# 2013.11.18: This is new:
# /ccp/dev/cp/pyserver/planner/routed_p2/route_finder.py:24: UserWarning:
# "Module osgeo was already imported from
# /ccp/opt/usr/lib/python2.7/site-packages/GDAL-1.10.1-py2.7-linux-x86_64.egg
# /osgeo/__init__.py, but /usr/lib64/python2.7/site-packages is being added
# to sys.path"
from pkg_resources import require
require("Graphserver>=1.0.0")
from graphserver.core import Crossing
from graphserver.core import GenericPyPayload
from graphserver.core import Link
from graphserver.core import State
from graphserver.core import TripAlight
from graphserver.core import TripBoard
from graphserver.core import WalkOptions
from graphserver.ext.gtfs.gtfsdb import GTFSDatabase
import conf
import g
from gwis.exception.gwis_error import GWIS_Error
from item.feat import byway
from item.feat import node_endpoint
from item.feat import route_step
from item.feat import route_stop
from item.util.item_query_builder import Item_Query_Builder
from planner.problem_base import Problem_Base
from planner.travel_mode import Travel_Mode
from planner.routed_p2.payload_byway import Payload_Byway
from util_ import db_glue
from util_ import geometry
from util_ import gml
from util_ import misc
__all__ = ['Problem']
log = g.log.getLogger('route_finder/p2')
class Problem(Problem_Base):
__slots__ = (
'req',
'gserver',
'beg_addr',
'fin_addr',
'beg_vertex_id',
'fin_vertex_id',
'beg_xy',
'fin_xy',
'rating_func',
'rating_min',
'p1_priority',
'xy_crow_flies',
'p2_depart_at',
'p2_transit_pref',
'is_reverse', # FIXME: Not implemented.
'depart_time',
'walk_opts',
'phase_change_grade',
'phase_change_velocity_factor',
'spt_vertex_id',
'db_gtfs',
'qb',
)
# *** Constructor
#
def __init__(self, req, graph, rt,
beg_vertex_id, fin_vertex_id, rating_func, rating_min,
beg_xy, fin_xy, is_reverse=False):
'''Finds a bike/transit route between the start and end vertices using
the given graph. Uses the departure time and transit preference to
determine when to route using transit, and when to route using
bicycling.'''
self.req = req # FIXME: Use to check branch, rev, and username?
self.gserver = graph.gserver
self.beg_vertex_id = str(beg_vertex_id)
self.fin_vertex_id = str(fin_vertex_id)
self.rating_func = rating_func
self.rating_min = rating_min # FIXME: Not used?
self.p1_priority = rt.p1_priority
self.beg_xy = beg_xy
self.beg_addr = rt.beg_addr
self.fin_xy = fin_xy
self.fin_addr = rt.fin_addr
self.xy_crow_flies = 0.0
self.p2_depart_at = rt.p2_depart_at
if not self.p2_depart_at:
raise GWIS_Error('Please specify p2_depart_at')
self.p2_transit_pref = rt.p2_transit_pref
self.depart_time = None
self.walk_opts = None
self.phase_change_grade = None
self.phase_change_velocity_factor = None
# FIXME: Carol has this coded but it's never True
g.assurt(not is_reverse) # FIXME: Not implemented
self.is_reverse = is_reverse
# Internal members
self.spt_vertex_id = None
self.db_gtfs = None
#
self.qb = None
# *** Public interface
#
def solve(self, qb):
'''Solves the problem of getting from point A to point B.'''
time_0 = time.time()
log.debug('solve: solving...')
# FIXME_2013_06_14:
# FIXME/EXPLAIN: Why is qb passed to fcns. when also set as instance var?
# oh, also: up until 2013.06.14 no one noticed solve(qb) for p2 but solve() p1
self.qb = qb
# Get handles to the two databases
self.db_gtfs = GTFSDatabase(conf.transitdb_filename)
# Get the walk options used to calculate costs.
# FIXME: Most walk options are currently hard-coded.
self.walk_opts = self.get_walk_options()
# The walk opts members are immutable, so save a ref to us for the Edge
Payload_Byway.outstanding_problems[self.walk_opts.soul] = self
# Convert the departure time string into seconds-since-epoch.
depart_time = Problem.date_flashclient_mktime(self.p2_depart_at)
# FIXME: Make this adjustment settable.
slack = 5 * 60 # Number of seconds to wait at the first transit stop.
self.depart_time = depart_time + slack
# Ask Graphserver for the shortest path tree.
spt = self.graphserver_get_spt()
# Ask Graphserver for the lists of vertices and edges.
(vertices, edges) = self.graphserver_get_path(spt)
rsteps = route_step.Many()
rstops = route_stop.Many()
path_len = 0.0
if len(vertices) > 0:
# Convert the path into route steps we can send back to the client.
(rsteps, rstops, path_len,) = self.path_convert(qb, vertices, edges)
# Adjust bicycle route steps' times according to transit edges.
# (That is, don't have the user get to the transit stop 30 mins.
# early.)
self.steps_adjust_jit_arrival(rsteps, slack)
# else, we'll raise in a moment, after cleaning up.
del Payload_Byway.outstanding_problems[self.walk_opts.soul]
# Destroy Graphserver C-objects
spt.destroy()
self.walk_opts.destroy()
self.walk_opts = None
log.debug('solve: route of %d steps found in %s'
% (len(rsteps), misc.time_format_elapsed(time_0),))
if not rsteps:
log.error('solve: route not found?: %s' % (self,))
# FIXME: This error message is not really "Help"ful. Why did the
# request fail? What specifically can the user do to fix the problem?
# SYNC_ME: This error message shared with routed_p1/route_finder.py.
#raise GWIS_Error(
# 'No route exists. Click "Help" for ideas on what to do next.')
raise GWIS_Error(Problem_Base.error_msg_basic)
# The path cost returned here is just for debugging. We'll compute it
# later, anyway, so just toss back a negative.
path_cost = -1.0
return (rsteps, rstops, path_cost, path_len,)
# *** First tier solve() helpers
#
def get_walk_options(self):
# FIXME: WalkOptions is immutable and not completed wired into Python.
# We have to set some options that Graphserver uses (FIXME:
# Enumerate those options), but some of the options are only used
# by core/edgetypes/street.c, which we don't use. So I [lb] think
# we should probably make our own object and not worry about this
# one.
# See graphserver/core.py, where all these options are defined.
# See also graphserver/core/walkoption.c for the defaults:
# FIXME: Magic Numbers. These should be user-choosable. See Cycloplan 2.
walkoptions = WalkOptions()
# The transfer_penalty is the no. of seconds penalty for each boarding.
# Increase if routes contain frivolous transfers, or decrease if routes
# avoiding all buses/trains/transit.
walkoptions.transfer_penalty = 5000
# NOTE: We don't care about turn_penalty; we have our own alg.
walkoptions.turn_penalty = 120
walkoptions.walking_speed = 4.5 # in meters per sec; approx 10 mph
walkoptions.uphill_slowness = 0.05
walkoptions.downhill_fastness = -12.1
#walkoptions.phase_change_grade = 0.045;
walkoptions.hill_reluctance = 0.0
walkoptions.max_walk = 10000 # meters
walkoptions.walking_overage = 0.1
log.debug('get_walk_options: walk_ops.soul: %s' % (walkoptions.soul))
# Graphserver defines these in walkoptions.c, but not in core.py. Hrm.
# And we can't attach them to walkoptions because that's a C object.
# And I [lb] quote: "Grade. An interesting thing thing happens at a
# particular grade, when they settle in for a long slog."
self.phase_change_grade = 0.045;
# From graphserver: "velocity between 0 grade and the phase change grade
# is Ax^2+Bx+C, where A is the phase_change_velocity_factor, B is the
# downhill fastness, and C is the average speed"
# FIXME: See speed_from_grade: this is almost the same calculation,
# expect speed_from_grade uses whatever the grade really is, and this
# uses a static value for the grade....
phase_change_speed = ((walkoptions.uphill_slowness
* walkoptions.walking_speed)
/ (walkoptions.uphill_slowness
+ self.phase_change_grade))
self.phase_change_velocity_factor = (
(phase_change_speed
- (walkoptions.downhill_fastness * self.phase_change_grade)
- walkoptions.walking_speed)
/ (self.phase_change_grade * self.phase_change_grade))
log.debug('get_walk_options: phase_change_grade: %s'
% (self.phase_change_grade,))
log.debug('get_walk_options: phase_change_velocity_factor: %s'
% (self.phase_change_velocity_factor,))
# FIXME: Why waste time with SQL? If crow_flies_sql and crow_flies_raw
# return same results, use latter (_raw) (or maybe timeit first).
crow_flies_sql = self.get_straightline_geom_len_sql(self.beg_xy,
self.fin_xy)
crow_flies_raw = self.get_straightline_geom_len_raw(self.beg_xy,
self.fin_xy)
if abs(crow_flies_raw - crow_flies_sql) > 0.01:
log.warning(
'Unexpectd diffr: xy: beg: %s / fin: %s // crow: sql: %s / raw: %s'
% (self.beg_xy, self.fin_xy, crow_flies_sql, crow_flies_raw,))
self.xy_crow_flies = crow_flies_raw
log.debug('get_walk_options: xy_crow_flies: %s' % (self.xy_crow_flies,))
if self.p2_transit_pref == -4:
walkoptions.walking_reluctance = 0.5
walkoptions.max_walk = Payload_Byway.ABSOLUTE_MAX_WALK
# Don't go negative, unless you want a century spaghetti ride. It
# means the further from the start you are, the more favored the edge.
#walkoptions.walking_overage = -0.1 # favors walking
walkoptions.walking_overage = 0.0
elif self.p2_transit_pref == -2:
walkoptions.walking_reluctance = 0.75
walkoptions.max_walk = Payload_Byway.ABSOLUTE_MAX_WALK
walkoptions.walking_overage = 0.0
elif self.p2_transit_pref == 0:
# Default value. Don't pref. either transit or biking.
walkoptions.walking_reluctance = 1.0
walkoptions.max_walk = Payload_Byway.ABSOLUTE_MAX_WALK # 1,000 km
walkoptions.walking_overage = 0.0
elif self.p2_transit_pref == 2:
walkoptions.walking_reluctance = 1.0
walkoptions.max_walk = int(self.xy_crow_flies * 1.33)
walkoptions.walking_overage = 0.1
elif self.p2_transit_pref == 4:
walkoptions.walking_reluctance = 2.0
walkoptions.max_walk = int(self.xy_crow_flies * 0.66)
walkoptions.walking_overage = 0.1
elif self.p2_transit_pref == 6:
walkoptions.walking_reluctance = 2.0
walkoptions.max_walk = 0
walkoptions.walking_overage = 0.1
else:
g.assurt(False)
log.debug('get_walk_options: relucance: %s / max_bike: %s / overage: %s'
% (walkoptions.walking_reluctance, walkoptions.max_walk,
walkoptions.walking_overage,))
return walkoptions
#
def graphserver_get_spt(self):
time_0 = time.time()
if not self.is_reverse:
log.debug('graphserver_get_spt: forward / shortest_path_tree')
spt_fcn = self.gserver.shortest_path_tree
self.spt_vertex_id = self.fin_vertex_id
else:
log.debug('graphserver_get_spt: reverse / shortest_path_tree_retro')
spt_fcn = self.gserver.shortest_path_tree_retro
self.spt_vertex_id = self.beg_vertex_id
spt = spt_fcn(self.beg_vertex_id, self.fin_vertex_id,
State(1, self.depart_time), self.walk_opts)
log.debug('graphserver_get_spt: %s / spt: %s'
% (misc.time_format_elapsed(time_0),
spt,))
return spt
#
def graphserver_get_path(self, spt):
time_0 = time.time()
log.debug('graphserver_get_path: calling spt.path...')
vertices = []
edges = []
try:
(vertices, edges) = spt.path(self.spt_vertex_id)
#for vertex in vertices:
# log.debug('graphserver_get_path: vertex.label: %s' % vertex.label)
#for edge in edges:
# log.debug('graphserver_get_path: edge: %s' % (edge,))
except Exception, e:
# BUG 2286: If Graphserver cannot find a path, e.g., to "Ridgedale
# Mall", it raises, e.g., "Exception: A path to 1302090 could not be
# found".
log.error('Unable to find a route: "%s" / %s'
% (str(e), traceback.format_exc(),))
#raise GWIS_Error('Unable to find a route: %s' % (str(e),))
raise GWIS_Error(Problem_Base.error_msg_basic)
finally:
log.debug('graphserver_get_path: %s / v. cnt: %d / e. cnt: %d'
% (misc.time_format_elapsed(time_0),
len(vertices), len(edges),))
return (vertices, edges,)
#
# FIXME: This fcn. is obnoxiously long: split into into multiple fcns.
def path_convert(self, qb, vertices, edges):
# FIXME: should these be route_step.Many() and route_stop.Many()?
route_steps = []
route_stops = []
last_alight = None
last_board = None
path_len = 0.0
time_0 = time.time()
log.debug('path_convert: making route steps...')
for i in xrange(0, len(edges)):
# NOTE: See class Edge in graphserver/core.py.
edge = edges[i]
beg_node = vertices[i]
fin_node = vertices[i+1]
# Handle byway.
if isinstance(edge.payload, Payload_Byway):
# FIXME: 2012.09.26: Is this still true?
# Should not get a byway after a board edge without another edge.
g.assurt(last_board is None)
byway_step = self.make_route_step_bicycle(
edge.payload, beg_node, fin_node)
log.debug('path_convert: Byway step: beg_nid: %d / fin_nid: %d'
% (byway_step.beg_node_id, byway_step.fin_node_id,))
if i == 0:
# For the first step, create a beginning stop.
log.debug('path_convert: Adding first stop for byway step.')
stop = route_stop.One(
qb, row={'name': self.beg_addr,
'is_pass_through': False,
'is_transit_stop': False,})
stop.fit_route_step(byway_step, True)
route_stops.append(stop)
else:
g.assurt(len(route_stops) > 0) # must have at least 1 by now
if last_alight is not None:
log.debug('Adding stop between TripAlight and Payload_Byway.')
# Handle the case where we have a TripAlight and then
# BywayPayload: we have to create the missing link (route stop).
stop = route_stop.One(
qb, row={'name': last_alight.fin_sta_name,
'is_pass_through': False,
'is_transit_stop': True,})
stop.fit_route_step(byway_step, True)
route_stops.append(stop)
last_alight = None
# else: the last step was not an alight, so no stop missing.
# Make any previous transit step's node id match up with this step
# and repair the node_id of the last transit stop.
if ((len(route_steps) > 0)
and (route_steps[-1].travel_mode == Travel_Mode.transit)):
node_id = (byway_step.beg_node_id if byway_step.forward else
byway_step.fin_node_id)
if route_steps[-1].forward:
route_steps[-1].fin_node_id = node_id
else:
route_steps[-1].beg_node_id = node_id
if ((route_stops[-1].is_transit_stop)
and (route_stops[-1].node_id is None)):
route_stops[-1].node_id = node_id
path_len += byway_step.edge_length
# Push the byway step onto the list of steps.
route_steps.append(byway_step)
# Handle link (which links a transit edge and a Cyclopath edge, i.e.,
# byway->transit and transit->byway transitions).
elif isinstance(edge.payload, Link):
# FIXME: 2012.09.26: Is this still true?
# We should not get a link after a board edge without a transit
# edge in between.
g.assurt(last_board is None)
link_step = self.make_route_step_link(qb, edge, beg_node, fin_node)
log.debug('Encountered link step')
if last_alight is not None:
# We are a link after the alight, so steal its metadata.
# EXPLAIN: When does Link follow TripAlight
# vs. when does Payload_Byway follow TripAlight?
link_step.step_name = last_alight.step_name
# Create a stop for the previous alight.
stop = route_stop.One(
qb, row={'name': last_alight.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
log.debug('Adding stop after last TripAlight.')
if ((i < (len(edges) - 1))
and (isinstance(edges[i + 1].payload, TripBoard))):
# Put stop at the start of the link.
stop.fit_route_step(link_step, True)
else:
# Put stop at the end of the link.
stop.fit_route_step(link_step, False)
route_stops.append(stop)
last_alight = None
# repair node_ids of the step if possible
if ((len(route_steps) > 0)
and (route_steps[-1].travel_mode == Travel_Mode.bicycle)):
# grab a node id from the previous bike step
if route_steps[-1].forward:
node_id = route_steps[-1].fin_node_id
else:
node_id = route_steps[-1].beg_node_id
if link_step.forward:
link_step.beg_node_id = node_id
else:
link_step.fin_node_id = node_id
# Push the link step onto the list of steps.
route_steps.append(link_step)
# Handle non-Link transit: TripAlight, TripBoard, and Crossing.
else:
tstep = self.make_route_step_transit(qb, edge, beg_node, fin_node)
# Handle TripAlights.
if isinstance(edge.payload, TripAlight):
log.debug('Encountered TripAlight, storing for later')
# Just store the alight in last_alight. A stop will be created
# at the end of the loop or at the next byway/link encountered.
last_alight = tstep
# Handle TripBoards.
elif isinstance(edge.payload, TripBoard):
log.debug('Encountered TripBoard step')
# Create a stop at the end of the previous step's geometry,
# or at the start of the next if this is the first edge.
stop = route_stop.One(
qb, row={'name': tstep.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
if len(route_steps) > 0:
log.debug('Creating stop for TripBoard.')
# If the previous step is a bicycle step, place at the end.
# If the previous step is a link, it's a weird situation
# where (Alight - Link - Board) so place at the end.
# And node_id is only set if the last step was a bike step.
# FIXME: Are we showing transit boardings at the Cyclopath
# node_endpoint or at the transit stop's actual x,y
# coordinates?
stop.fit_route_step(route_steps[-1], False)
route_stops.append(stop)
# Push metadata onto previous link.
if ((route_steps[-1].step_name is None)
and (route_steps[-1].travel_mode
== Travel_Mode.transit)):
route_steps[-1].step_name = tstep.step_name
else:
# Store the TripBoard to be processed by the next Crossing
# step.
# FIXME: Make this verbose...
log.debug('Storing tripBoard for later use.')
last_board = tstep
# Handle Crossings.
elif isinstance(edge.payload, Crossing):
# FIXME: log.verbose
log.debug('Encountered Crossing edge')
if last_board is not None:
# Add the stop.
# FIXME: log.verbose
log.debug('Adding stop from previously stored TripBoard.')
stop = route_stop.One(
qb, row={'name': last_board.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
stop.fit_route_step(tstep, True)
route_stops.append(stop)
last_board = None
# Repair the node_ids of the step if possible.
if ((len(route_steps) > 0)
and (route_steps[-1].travel_mode == Travel_Mode.bicycle)):
# Grab a node id from the previous bike step.
if route_steps[-1].forward:
node_id = route_steps[-1].fin_node_id
else:
node_id = route_steps[-1].beg_node_id
if link_step.forward:
link_step.beg_node_id = node_id
else:
link_step.fin_node_id = node_id
route_steps.append(tstep)
# No other edge types.
else:
# This code should be unreachable.
g.assurt(False)
# We're done processing edges. See if we're missing the fininshing stop.
if route_steps[-1].travel_mode == Travel_Mode.bicycle:
log.debug('Adding last stop: after a bicycle step.')
# Add a last stop for the path.
stop = route_stop.One(
qb, row={'name': self.fin_addr,
'is_pass_through': False,
'is_transit_stop': False,})
stop.fit_route_step(route_steps[-1], False)
route_stops.append(stop)
else:
g.assurt(route_steps[-1].travel_mode == Travel_Mode.transit)
if last_alight is not None:
log.debug('Adding last stop: after a transit stop.')
# We had a transit stop at the very end but no ending link to
# process it, so add a transit stop.
stop = route_stop.One(
qb, row={'name': last_alight.step_name,
'is_pass_through': False,
'is_transit_stop': True,})
stop.fit_route_step(route_steps[-1], False)
route_stops.append(stop)
else:
# EXPLAIN: We're all good?
log.warning('Not adding last stop for after a transit stop?')
# Fix route steps with null node IDs. I.e., transit stops should be
# aligned with their byway neighbors so that we have a connected path.
self.repair_node_ids(route_steps, route_stops)
log.debug('Path conversion completed')
# DEVS: Uncomment this if you want a lot of output...
#for rs in route_steps:
# log.debug(' >> step: %s %s to %s %s'
# % (rs.transit_type,
# str(rs.beg_time),
# str(rs.fin_time),
# str(rs.transit_name)))
log.debug('path_convert: %.2f secs / rs. cnt: %d'
% ((time.time() - time_0), len(route_steps),))
return (route_steps, route_stops, path_len,)
# Replace null node_ids and beg_node_id/fin_node_id with unique negative
# IDs, so that it's still possible to identify where a stop exists in a
# route, even if the steps aren't part of the byway graph (since we don't
# support editing multimodal routes... yet).
# BUG nnnn: Routed p2: Editable routes.
def repair_node_ids(self, route_steps, route_stops):
# Repair any null step node ids.
route_step.Many.repair_node_ids(self.qb, route_steps)
# Iterate through the stops.
last_found_step = -1
for stop in route_stops:
if not stop.node_id:
# must find a route_step that fits this geometry and assign
# the appropriate node id
step_i = last_found_step + 1
while step_i < len(route_steps):
step = route_steps[step_i]
# FIXED?: Was: geom = gml.flat_to_xys(step.geometry[2:])
geom = gml.flat_to_xys(step.geometry)
# FIXME: MAGIC NUMBERS. FIXME: Use Ccp precision of 0.01?
#fixme: do something like this?
#existing_xy = geometry.wkt_point_to_xy(rows[0]['existing_xy_wkt'],
# precision=conf.node_precision)
#proposed_xy = geometry.wkt_point_to_xy(self.endpoint_wkt,
# precision=conf.node_precision)
#
# FIXME: How does these node IDs relate to node_endpoint, etc.?
if ((abs(stop.x - geom[0][0]) < .0001)
and (abs(stop.y - geom[0][1]) < .0001)):
# matches start of the step
stop.node_id = step.beg_node_id
last_found_step = step_i
break
elif ((abs(stop.x - geom[-1][0]) < .0001)
and (abs(stop.y - geom[-1][1]) < .0001)):
# matches end of the step
stop.node_id = step.fin_node_id
last_found_step = step_i
break
step_i = step_i + 1
# end while.
# Since we moved transit edges' endpoints to match Cyclopath
# node_endpoints, we should have found a matching step by now.
g.assurt(stop.node_id)
# else, stop.node_id is nonzero, so nothing to do.
# Graphserver returns a route that leaves at the start time and will wait
# at the transit stop for however long is necessary. Working back from the
# first transit stop's departure, adjust start time of cycling steps to
# minimize wait.
def steps_adjust_jit_arrival(self, route_steps, slack):
time_0 = time.time()
log.debug('steps_adjust_jit_arrival: len(route_steps): %d'
% (len(route_steps),))
first_transit_step = None
prev_transit_step = None
steps_to_adjust = []
# Look for the first transit edge.
for step in route_steps:
# NOTE: In Pre-Route Sharing, travel_mode was called transit_type
# and in this fcn., we checked that it was 'board_bus' or
# 'board_train'. Now, Travel_Mode.transit also includes the Crossing
# and Link types (see also TripAlight and TripBoard), but Crossing
# not Link should be the first step...
if step.travel_mode == Travel_Mode.transit:
# FIXME: g.assurt this is TripBoard?
if first_transit_step is None:
first_transit_step = step
prev_transit_step = step
else:
# We trace back from the first transit stop, so assemble a list of
# bicycle edges in reverse order.
# FIXME: Only applies to init. bike edges? I.e., not after alight?
# What about transfers?
if first_transit_step is None:
steps_to_adjust.insert(0, step)
if prev_transit_step is not None:
# BUG 2296 Correct the board edges' end times: set to the
# Crossing edge's start time and subtract a minute.
prev_transit_step.fin_time = step.beg_time - 60
prev_transit_step = None
log.debug('steps_adjust_jit_arrival: found: %s (%d edges)'
% (first_transit_step, len(steps_to_adjust),))
# Bug 2293: If the first step is a transit edge, it's a Board edge, and
# it's start time is the time for which the user requested the route
# and for which we configured the State() object when we submitted the
# problem to Graphserver. Correct the Board edge here, otherwise you get
# wonky results -- e.g., if you request a route at 4 AM but the first bus
# isn't until 6 AM, you'll get told to board the bus at 6 AM.
# If there are no transit edges, this is an all-cycling route, so the
# route steps do not need adjustment. Otherwise, go through route steps
# and adjust the cycling edges' start and end times.
if first_transit_step is not None:
transit_departs = first_transit_step.fin_time
total_duration = slack # No. seconds to wait at first transit stop
first_transit_step.beg_time = transit_departs - total_duration
for step in steps_to_adjust:
duration = step.fin_time - step.beg_time
step.fin_time = transit_departs - total_duration
total_duration += duration
step.beg_time = transit_departs - total_duration
log.debug('steps_adjust_jit_arrival: %s / adjust cnt: %d'
% (misc.time_format_elapsed(time_0),
len(steps_to_adjust),))
# *** Route Step support routines
#
def make_route_step_bicycle(self, payload, beg_node, fin_node):
# FIXME: Does route_step.forward and payload.reverse match up?
# Does it affect beg_node_id and fin_node_id?
rs = route_step.One()
rs.travel_mode = Travel_Mode.bicycle
rs.init_from_byway(payload.byway)
rs.forward = payload.forward
# The rating is the generic rating; the caller, route.py, will
# overwrite this with the logged-in user's rating, if the user is
# logged in. Note that we can't use byway.user_rating, since the
# Transit_Graph's byways is a collection on anon. user byways.
rs.rating = payload.byway.generic_rating
# Transit attrs.
# Not applicable to bicycle edges: beg_sta_name, fin_sta_name,
# duration, transit_name
# FIXME: Return 'duration'? It wouldn't be that hard to calculate
# (we already do so in Payload_Cyclopath.cost_graphserver_passable)
# (And I think flashclient caculates this value, too, but it's
# probably not the same as what we calculate... and then you have
# to maintain twice as much code, too. =)
# Shared attrs.
rs.beg_time = beg_node.state.time
rs.fin_time = fin_node.state.time
return rs
#
def make_route_step_link(self, qb, edge, beg_node, fin_node):
# In old CcpV1, there was no travel_mode but instead here we used:
# 'transit_type': 'link',
rs = route_step.One(
qb, row={'travel_mode': Travel_Mode.transit,
'forward': True,
'beg_time': beg_node.state.time,
'fin_time': fin_node.state.time,})
# Get start and end points
if beg_node.label.startswith('sta'):
sql_beg_pt_wkt = self.get_xy_wkt_from_station_node(beg_node)
sql_fin_pt_wkt = self.get_xy_wkt_from_network_node(fin_node)
else:
g.assurt(fin_node.label.startswith('sta')) # Is this right?
sql_beg_pt_wkt = self.get_xy_wkt_from_network_node(beg_node)
sql_fin_pt_wkt = self.get_xy_wkt_from_station_node(fin_node)
# Get straightline geometry
# FIXME: Can graph calculate this when it loads? (a) So we don't waste
# time calculating it now, and (b) so we don't waste time re-calculating
# every time someone makes a route request.
# EXPLAIN: Why are we using SVG here? We usually use E/WKT and xy...
rs.geometry_svg = self.get_straightline_geom(sql_beg_pt_wkt,
sql_fin_pt_wkt)
return rs
# A note about the difference between PSV and STA nodes. I'm still not quite
# sure I get it. =)
#
# From Brandon Martin-Anderson (Graphserver Hero Extraordinaire):
#
# "sta-" vertices are "station" vertices and correspond to physical
# transit stops. "psv-" are Pattern-Stop Vertices, or PSVs. These
# vertices represent the state of being on a transit vehicle traveling
# on a particular pattern at a particular stop. For example, a class of
# edges called "TripBoard" edges model moving between station vertices
# which model being at a station and _off_ a vehicle to pattern-stop
# vertices which model being at a station _on_ a vehicle. Then a class
# of edges called "Crossing" edges model going between to PSVs. It's
# a little contrived, but it's necessary to work around the dreaded
# Pro-Active Transferring Bug.
#
# Reference:
#
# https://groups.google.com/group/graphserver/msg/7a18e62fdccf0722
# https://github.com/bmander/graphserver/wiki/Board---Alight---Crossing-Transit-Graphs
#
# Definition:
#
# alighting: Descend from a train, bus, or other form of transportation.
#
def make_route_step_transit(self, qb, edge, beg_node, fin_node):
rs = route_step.One(
qb, row={'travel_mode': Travel_Mode.transit,
'forward': True,
'beg_time': beg_node.state.time,
'fin_time': fin_node.state.time,})
transit_name = None
beg_sta_name = None
fin_sta_name = None
beg_sta_node = None
fin_sta_node = None
# NOTE: Graphserver hard-codes the node labels with 'sta-' and 'psv-'
# prefixes. There's no other mechanism to determine the type of
# vertex other than doing a string compare on its label.
if beg_node.label.startswith('psv'):
# Get beg_sta_name and beg_sta_node.
transit_name = self.get_transit_route_name(beg_node)
beg_sta_node = self.transit_get_station_vertex(beg_node.label,
True)
# BUG 2287: See below
if beg_sta_node is None:
log.info(
'make_route_step_transit: no beg_sta_node: %s (%s / %s)'
% (edge.payload, beg_node, fin_node,))
# Well, if we go the other way, we should find a transit station.
beg_sta_node = self.transit_get_station_vertex(beg_node.label,
False)
if beg_sta_node is not None:
beg_sta_name = self.get_stop_name(beg_sta_node)
else:
# BUG nnnn: l10n
beg_sta_name = 'Start Here'
log.warning('...step_transit: no beg_sta_node: beg_node: %s'
% (beg_node,))
# MAGIC_NUMBER: 'psv': ...
if fin_node.label.startswith('psv'):
transit_name = self.get_transit_route_name(fin_node)
fin_sta_node = self.transit_get_station_vertex(fin_node.label,
False)
# BUG 2287: Some transit stops' PSV nodes only have incoming or
# outgoing Crossing edges, but do not have incoming or
# outgoing Alight or Board edges.
#
# E.g. search for a multimodal route from 'cs building' to
# 'moa' with Minimize Biking and More Busing, 6/29/2011 at
# 8:45 AM. The last edge, before the Alight, is a Crossing
# whose vertices are both PSVs, but one of the PSVs is only
# attached to a Crossing edge, and not to an Alight or
# Board.
#
# For now, this seems to do the trick: just look at the
# other direction (so, if the incoming edges is just a
# Crossing edge, look at the outgoing edges).
#
# I'm not [lb isn't] 100% sure this is the proper solution,
# but it works for now....
if fin_sta_node is None:
log.info(
'make_route_step_transit: no fin_sta_node: %s (%s / %s)'
% (edge.payload, beg_node, fin_node,))
fin_sta_node = self.transit_get_station_vertex(fin_node.label,
True)
if fin_sta_node is not None:
fin_sta_name = self.get_stop_name(fin_sta_node)
else:
# BUG nnnn: l10n
fin_sta_name = 'End Here'
log.warning('...step_transit: no fin_sta_node: fin_node: %s'
% (fin_node,))
# FIXME: MAGIC_NUMBER: hack to identify light rail...
# FIXME: What's the long term solution here? Is this data indicated in
# the GTFSDB?
transit_type_name = 'Bus'
if transit_name == '55':
transit_type_name = 'Train'
# MAYBE: Do we (in route_step) or does flashclient care about board v.
# alight?
if isinstance(edge.payload, TripBoard):
# TripBoard only needs station name and route name
rs.step_name = '%s %s at %s' % (transit_type_name,
transit_name,
beg_sta_name or fin_sta_name,)
elif isinstance(edge.payload, TripAlight):
# TripAlight only needs station name and route name
rs.step_name = '%s %s at %s' % (transit_type_name,
transit_name,
fin_sta_name or beg_sta_name,)
elif isinstance(edge.payload, Crossing):
# Crossing only needs route name
rs.step_name = '%s %s' % (transit_type_name, transit_name,)
# If Crossing, get straightline geometry from start to end station.
if (beg_sta_node is not None) and (fin_sta_node is not None):
sql_beg_pt_wkt = self.get_xy_wkt_from_station_node(beg_sta_node)
sql_fin_pt_wkt = self.get_xy_wkt_from_station_node(fin_sta_node)
rs.geometry_svg = self.get_straightline_geom(sql_beg_pt_wkt,
sql_fin_pt_wkt)
else:
# Crossing should have PSV endpoints, but not always...
log.warning('...step_transit: messed up Crossing?: %s / %s / %s'
% (edge, beg_sta_node, fin_sta_node,))
else:
log.warning('EXPLAIN: Why no geometry for this transit step?')
rs.geometry_svg = None
return rs
# ***
# FIXME: This works for now, but it needs cleaning up
#
def transit_get_station_vertex(self, psv_label, is_outgoing):
'''Search for a station vertex (i.e. its label starts with sta-)
so that we can use its id to look up stop information. PSV vertices
do not contain the stop id unfortunately, so we can't use them.
NOTE: It appears that some stops cannot be found when using certain
values for is_outgoing, so if one value doesn't work, the other
should be used as a fallback.'''
if is_outgoing:
edges = self.gserver.get_vertex(psv_label).outgoing
else:
edges = self.gserver.get_vertex(psv_label).incoming
for edge in edges:
if (isinstance(edge.payload, TripAlight)):
# This is the last route step when routing to a transit stop as the
# final destination.
g.assurt(is_outgoing)
return edge.to_v
elif (isinstance(edge.payload, TripBoard)):
# If you route from a transit stop, look at incoming edges... I
# guess.
g.assurt(not is_outgoing)
return edge.from_v
# else, it's a Crossing; if there's a next edge in the list, it's an
# Alight or Board edge.
return None
# *** Static Support Routines
# FIXME: BUG 2291: This fcn. does not respek daylight savings time
# FIXME: If date outside (before or after) GTFSDB calendar, warn user
#
@staticmethod
def date_flashclient_mktime(date_str):
log.debug('date_flashclient_mktime: date_str (1): %s' % (date_str,))
# If daylight savings, remove Mpls' GMT
if (date_str.find('GMT-0500') != -1):
date_str = date_str.replace('GMT-0500', '')
log.debug('date_flashclient_mktime: Stripped GMT-0500')
elif (date_str.find('GMT-0600') != -1):
# If winter, remove Mpls' non-CDT GMT
date_str = date_str.replace('GMT-0600', '')
log.debug('date_flashclient_mktime: Stripped GMT-0600')
else:
g.assurt(False)
log.debug('date_flashclient_mktime: date_str: %s' % (date_str,))
secs_since_epoch = time.mktime(time.strptime(date_str,
'%a %b %d %H:%M:%S %Y'))
log.debug('date_flashclient_mktime: secs_since_epoch: %s'
% (secs_since_epoch,))
return secs_since_epoch
# *** SQL Support Routines
#
def get_xy_wkt_from_network_node(self, node_endpoint):
node_id = node_endpoint.label
ndpt = node_endpoint.Many.node_endpoint_get(self.qb, node_id, pt_xy=None)
g.assurt(ndpt is not None)
try:
if ndpt.endpoint_wkt:
geom = ndpt.endpoint_wkt
else:
g.assurt(False) # 2012.08.02: Deprecated. See ndpt.endpoint_wkt.
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_sql = geometry.xy_to_raw_point_lossless(ndpt.endpoint_xy)
sql_points = self.qb.db.sql("SELECT %s" % (point_sql,))
log.debug('get_xy_wkt_from_network_node: rows: %s'
% (sql_points,))
geom = sql_points[0]['st_asewkt']
except IndexError:
log.warning(
'get_xy_wkt_from_network_node: missing geom: node_endpoint: %s'
% (node_endpoint, node_id,))
# I'm [lb's] not sure how best to propagate this error, so let's
# just assume it'll never happen, cool beans?
g.assurt(False)
geom = None
return geom
#
def get_straightline_geom(self, beg_pt_wkt, fin_pt_wkt):
rows = self.qb.db.sql(
"""
SELECT
ST_AsSVG(ST_Scale(ST_MakeLine(('%s'), ('%s')), 1, -1, 1), 0, %d)
AS geometry
""" % (beg_pt_wkt, fin_pt_wkt, conf.db_fetch_precision,))
return rows[0]['geometry']
#
# MAYBE: Rename from_ and to_ to beg_ and fin_.
def get_straightline_geom_len_sql(self, beg_xy, fin_xy):
# FIXME: Why not use wkt_point_to_xy, et al.?
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_lhs = geometry.xy_to_raw_point_lossless(beg_xy)
point_rhs = geometry.xy_to_raw_point_lossless(fin_xy)
rows = self.qb.db.sql(
"SELECT ST_Length(ST_MakeLine(ST_AsEWKT(%s), ST_AsEWKT(%s)))"
% (point_lhs, point_rhs,))
return rows[0]['st_length']
#
def get_straightline_geom_len_raw(self, beg_xy, fin_xy):
return geometry.distance(beg_xy, fin_xy)
# TRANSITDB
#
def get_stop_xy(self, station_node):
cursor = self.db_gtfs.conn.cursor()
station_id = station_node.label[4:]
cursor.execute(
"SELECT stop_lat, stop_lon FROM stops WHERE stop_id = %s"
% (station_id,))
row = cursor.fetchone()
return row
#
def get_stop_map_xy(self, station_node):
stop_xy = self.get_stop_xy(station_node)
log.debug('get_stop_map_xy: stop_xy: %s' % (stop_xy,))
# Convert to map coordinates.
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_sql = geometry.xy_to_raw_point_lossless((stop_xy[1], stop_xy[0],),
srid=conf.srid_latlon)
rows = self.qb.db.sql(
"""
SELECT
ST_X(ST_Transform(%s, %d))
AS xcoord,
ST_Y(ST_Transform(%s, %d))
AS ycoord
""" % (point_sql, conf.default_srid,
point_sql, conf.default_srid,))
log.debug('get_stop_map_xy: %s' % (rows[0],))
return [rows[0]['xcoord'], rows[0]['ycoord']]
#
def get_xy_wkt_from_station_node(self, station_node):
stop_xy = self.get_stop_xy(station_node)
# E.g., "ST_GeomFromEWKT('SRID=%d;POINT(%.6f %.6f)')"
point_sql = geometry.xy_to_raw_point_lossless((stop_xy[1], stop_xy[0],),
srid=conf.srid_latlon)
rows = self.qb.db.sql("SELECT ST_AsEWKT(ST_Transform(%s, %d))"
% (point_sql, conf.default_srid,))
log.debug('get_xy_wkt_from_station_node: rows: %s' % (rows,))
return rows[0]['st_asewkt']
#
def get_stop_name(self, station_node):
cursor = self.db_gtfs.conn.cursor()
station_id = station_node.label[4:]
cursor.execute(
"SELECT stop_name FROM stops WHERE stop_id = %s"
% (station_id,))
row = cursor.fetchone()
# EXPLAIN: When is the value not a string?
stop_name = str(row[0])
return stop_name
#
def get_transit_route_name(self, psv_node):
# Fetch route_short_name from the transit database.
cursor = self.db_gtfs.conn.cursor()
cursor.execute(
"SELECT route_id FROM trips WHERE trip_id = '%s'"
% (psv_node.state.trip_id,))
row = cursor.fetchone()
rte_id = row[0]
# NOTE: This is a transit route, not a Cyclopath route.
cursor.execute(
"SELECT route_short_name FROM routes WHERE route_id = '%s'"
% (rte_id,))
row = cursor.fetchone()
# EXPLAIN: When is the value not a string?
route_name = str(row[0])
return route_name
# ***
# *** Unit tests
def unit_test_01():
dateStr = "Fri Mar 25 03:30:00 GMT-0500 2011"
Problem.date_flashclient_mktime(dateStr)
if (__name__ == '__main__'):
unit_test_01()
|
|
import collections
import copy
import logging
import os
import Queue
import socket
import sys
import threading
import time
import ujson
import bisect
import zmq
import utils
logger = logging.getLogger(__name__)
def _gauge_pair():
""" Track gauge as a pair of (sum, count) """
return [0, 0]
class Speakeasy(object):
def __init__(self, host, metric_socket, cmd_port, pub_port, emitter_name,
emitter_args=None, emission_interval=60, legacy=None,
hwm=20000, socket_mod=None):
""" Aggregate metrics and emit. Also support live data querying. """
self.metric_socket = metric_socket
self.pub_port = pub_port
self.cmd_port = cmd_port
self.emitter_name = emitter_name
self.emission_interval = emission_interval
self.hostname = host
self.legacy = legacy
self.percentiles = [0.5, 0.75, 0.95, 0.99]
self.metrics_queue = Queue.Queue()
self.metrics_lock = threading.RLock()
# Setup legacy socket if needed
self.legacy_socket = None
if self.legacy:
if os.path.exists(self.legacy):
logger.warn('Remove existing legacy socket "{0}" and recreating'.format(self.legacy))
os.remove(self.legacy)
self.legacy_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.legacy_socket.bind(self.legacy)
self.legacy_socket_fno = self.legacy_socket.fileno()
# Process the args for emitter
self.emitter_args = {}
if emitter_args:
for arg in emitter_args:
k, v = arg.split('=')
self.emitter_args[k] = v
# Setup the emitter
self.emitter = import_emitter(self.emitter_name, **self.emitter_args)
if not self.emitter:
logger.warn("No emitter found")
self.context = zmq.Context()
# Listen for metrics
self.recv_socket = self.context.socket(zmq.PULL)
# Increase the HWM
self.recv_socket.set_hwm(hwm)
self.recv_socket.bind('ipc://{0}'.format(self.metric_socket))
if socket_mod:
os.chmod(self.metric_socket, socket_mod)
# Listen for commands
self.cmd_socket = self.context.socket(zmq.REP)
self.cmd_socket.bind('tcp://*:{0}'.format(self.cmd_port))
# Publish metrics
if self.pub_port:
self.pub_socket = self.context.socket(zmq.PUB)
self.pub_socket.bind('tcp://*:{0}'.format(self.pub_port))
# Register sockets for polling
self.poller = zmq.Poller()
self.poller.register(self.recv_socket, zmq.POLLIN)
self.poller.register(self.cmd_socket, zmq.POLLIN)
if self.legacy_socket:
self.poller.register(self.legacy_socket, zmq.POLLIN)
# Setup poll and emit thread
self.poll_thread = threading.Thread(target=self.poll_sockets, args=())
self.emit_thread = threading.Thread(target=self.emit_metrics, args=())
self.process_thread = threading.Thread(target=self.process_metrics_queue, args=())
# Init metrics
# Index metrics by appname
self.metrics = {}
def process_metrics_queue(self):
logger.info("Start processing metrics queue")
while self.running:
try:
metric, legacy = self.metrics_queue.get(block=False)
except Queue.Empty:
time.sleep(0.01)
continue
try:
self.process_metric(metric, legacy=legacy)
except Exception as e:
logger.warn("Failed to process metric: {0}".format(e))
self.metrics_queue.task_done()
def gauge_append(self, lst, value):
lst[0] += value
lst[1] += 1
def gauge_sum(self, lst):
return float(lst[0])/lst[1]
def process_gauge_metric(self, app_name, metric_name, value):
with self.metrics_lock:
dp = self.metrics[app_name]['GAUGE'][metric_name]
self.gauge_append(dp, value)
return self.gauge_sum(dp)
def process_counter_metric(self, app_name, metric_name, value):
with self.metrics_lock:
self.metrics[app_name]['COUNTER'][metric_name] += value
return self.metrics[app_name]['COUNTER'][metric_name]
def process_metric(self, metric, legacy=False):
""" Process metrics and store and publish """
if legacy:
# Legacy format for metrics is slightly different...
# Index them under same "app name"
app_name = '__LEGACY__'
metric_name, value, metric_type = metric.split('|')
else:
app_name, metric_name, metric_type, value = metric
try:
value = float(value)
except ValueError:
logger.warn("Failed to cast metric value to float - {0}".format(metric))
return
if app_name not in self.metrics:
self.init_app_metrics(app_name)
dp = None
if self.pub_port:
pub_metrics = []
else:
pub_metrics = None
pub_val = None
if metric_type == 'GAUGE':
pub_val = self.process_gauge_metric(app_name, metric_name, value)
# Publish the current running average
elif metric_type == 'PERCENTILE' or metric_type == 'HISTOGRAM':
# Kill off the HISTOGRAM type!!
metric_type = 'PERCENTILE'
# Track average value separately
avg_pub_val = self.process_gauge_metric(app_name, metric_name + 'average', value)
with self.metrics_lock:
dp = self.metrics[app_name][metric_type][metric_name]
# dp must be sorted before passing to utils.percentile
bisect.insort(dp, value)
# Publish the current running percentiles
if self.pub_port:
cur_time = time.time()
for p in self.percentiles:
pub_metrics.append((self.hostname, app_name,
'{0}{1}_percentile'.format(metric_name, int(p*100)),
'GAUGE', utils.percentile(dp, p),
cur_time))
pub_metrics.append((self.hostname, app_name, metric_name + 'average', 'GAUGE', avg_pub_val, cur_time))
elif metric_type == 'COUNTER':
pub_val = self.process_counter_metric(app_name, metric_name, value)
# Publish the running count
else:
logger.warn("Unrecognized metric type - {0}".format(metric))
return
if self.pub_port:
if metric_type != 'PERCENTILE':
pub_metrics.append((self.hostname, app_name, metric_name,
metric_type, pub_val, time.time()))
msg = ujson.dumps(pub_metrics)
self.pub_socket.send(msg)
def process_command(self, cmd):
""" Process command and reply """
# TODO: Do something here
pass
def poll_sockets(self):
""" Poll metrics socket and cmd socket for data """
logger.info("Start polling")
while self.running:
socks = dict(self.poller.poll(1000))
if socks.get(self.recv_socket) == zmq.POLLIN:
try:
metric = ujson.loads(self.recv_socket.recv())
# Put metric on metrics queue
self.metrics_queue.put((metric, False))
except ValueError as e:
logger.warn("Error receving metric: {0}".format(e))
if socks.get(self.cmd_socket) == zmq.POLLIN:
cmd = ujson.loads(self.cmd_socket.recv())
# Process command
self.process_command(cmd)
if self.legacy and socks.get(self.legacy_socket_fno) == zmq.POLLIN:
# Process legacy format
try:
data, addr = self.legacy_socket.recvfrom(8192)
self.metrics_queue.put((data, True))
except socket.error, e:
logger.error('Error on legacy socket - {0}'.format(e))
logger.info("Stop polling")
def emit_metrics(self):
""" Send snapshot of metrics through emitter """
while self.running:
logger.info("Emit metrics")
# Grab "this is what the world looks like now" snapshot
metrics_ss = self.snapshot()
e_start = time.time()
if self.emitter:
self.emitter.emit(metrics_ss)
e_end = time.time()
# Sleep for 1 second interval until time to emit again
if (e_end - e_start) < self.emission_interval:
sleep_until = time.time() + (self.emission_interval - (e_end - e_start))
while self.running:
ct = time.time()
if ct > sleep_until:
break
else:
if sleep_until - ct < 1:
time.sleep(sleep_until - ct)
else:
time.sleep(1)
logger.info("Stop emitting")
def snapshot(self):
"""
Return a snapshot of current metrics
[(app, metric, val, type, timestamp), ...]
"""
metrics = []
with self.metrics_lock:
logger.debug("Inside of metrics lock")
ss = copy.deepcopy(self.metrics)
# Reset metrics
self.reset_metrics()
for app in ss:
for m, val in ss[app]['COUNTER'].iteritems():
metrics.append((app, m, val, 'COUNTER', time.time()))
for m, vals in ss[app]['GAUGE'].iteritems():
if vals[1] == 0:
logger.debug("No values for metric: {0}".format(m))
continue
if vals:
metrics.append((app, m, vals[0] / float(vals[1]),
'GAUGE', time.time()))
for m, vals in ss[app]['PERCENTILE'].iteritems():
if len(vals) == 0:
logger.debug("No values for metric: {0}".format(m))
continue
# Emit 50%, 75%, 95%, 99% as GAUGE
for p in self.percentiles:
# Assume the metric name has a trailing separator to append
# the percentile to
metrics.append((app, '{0}{1}_percentile'.format(m, int(p*100)),
utils.percentile(vals, p), 'GAUGE', time.time()))
return metrics
def reset_metrics(self):
""" Reset metrics for next interval """
for app in self.metrics:
with self.metrics_lock:
self.metrics[app]['GAUGE'] = collections.defaultdict(_gauge_pair)
self.metrics[app]['PERCENTILE'] = collections.defaultdict(list)
def init_app_metrics(self, app):
""" Setup initial metric structure for new app """
if app not in self.metrics:
with self.metrics_lock:
self.metrics[app] = {
'GAUGE': collections.defaultdict(_gauge_pair),
'COUNTER': collections.defaultdict(int),
'PERCENTILE': collections.defaultdict(list)
}
def start(self):
self.__start()
def shutdown(self):
self.__stop()
def __start(self):
self.running = True
self.poll_thread.start()
self.emit_thread.start()
self.process_thread.start()
def __stop(self):
self.running = False
logger.info("Shutting down")
if self.poll_thread:
logger.info("Waiting for poll thread to stop...")
self.poll_thread.join()
if self.emit_thread:
logger.info("Waiting for emit thread to stop...")
self.emit_thread.join()
if self.process_thread:
logger.info("Waiting for process thread to stop...")
self.process_thread.join()
self.__cleanup()
def __cleanup(self):
if self.legacy:
if os.path.exists(self.legacy):
logger.info('Cleaning up legacy socket')
os.remove(self.legacy)
os.remove(self.metric_socket)
def import_emitter(name, **kwargs):
namespace = 'speakeasy.emitter.'
if namespace not in name:
name = namespace + name
try:
__import__(name)
except Exception:
# app doesn't exist
return
module = sys.modules[name]
return module.Emitter(**kwargs)
if __name__ == '__main__':
server = Speakeasy('0.0.0.0', '/var/tmp/metrics_socket', '55001', '55002',
'simple', ['filename=/var/tmp/metrics.out'], 60,
'/var/tmp/metric_socket2')
server.start()
while True:
try:
time.sleep(1)
except:
print "Exception... exiting"
server.shutdown()
break
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.proto.pubsub.v1.pubsub_pb2 as google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2
import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2
class SubscriberStub(object):
"""The service that an application uses to manipulate subscriptions and to
consume messages from a subscription via the `Pull` method.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSubscription = channel.unary_unary(
'/google.pubsub.v1.Subscriber/CreateSubscription',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.FromString,
)
self.GetSubscription = channel.unary_unary(
'/google.pubsub.v1.Subscriber/GetSubscription',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.GetSubscriptionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.FromString,
)
self.UpdateSubscription = channel.unary_unary(
'/google.pubsub.v1.Subscriber/UpdateSubscription',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateSubscriptionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.FromString,
)
self.ListSubscriptions = channel.unary_unary(
'/google.pubsub.v1.Subscriber/ListSubscriptions',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSubscriptionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSubscriptionsResponse.FromString,
)
self.DeleteSubscription = channel.unary_unary(
'/google.pubsub.v1.Subscriber/DeleteSubscription',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSubscriptionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyAckDeadline = channel.unary_unary(
'/google.pubsub.v1.Subscriber/ModifyAckDeadline',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ModifyAckDeadlineRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Acknowledge = channel.unary_unary(
'/google.pubsub.v1.Subscriber/Acknowledge',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.AcknowledgeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Pull = channel.unary_unary(
'/google.pubsub.v1.Subscriber/Pull',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PullRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PullResponse.FromString,
)
self.StreamingPull = channel.stream_stream(
'/google.pubsub.v1.Subscriber/StreamingPull',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.StreamingPullRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.StreamingPullResponse.FromString,
)
self.ModifyPushConfig = channel.unary_unary(
'/google.pubsub.v1.Subscriber/ModifyPushConfig',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ModifyPushConfigRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListSnapshots = channel.unary_unary(
'/google.pubsub.v1.Subscriber/ListSnapshots',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSnapshotsResponse.FromString,
)
self.CreateSnapshot = channel.unary_unary(
'/google.pubsub.v1.Subscriber/CreateSnapshot',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.CreateSnapshotRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
'/google.pubsub.v1.Subscriber/DeleteSnapshot',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Seek = channel.unary_unary(
'/google.pubsub.v1.Subscriber/Seek',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.SeekRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.SeekResponse.FromString,
)
class SubscriberServicer(object):
"""The service that an application uses to manipulate subscriptions and to
consume messages from a subscription via the `Pull` method.
"""
def CreateSubscription(self, request, context):
"""Creates a subscription to a given topic.
If the subscription already exists, returns `ALREADY_EXISTS`.
If the corresponding topic doesn't exist, returns `NOT_FOUND`.
If the name is not provided in the request, the server will assign a random
name for this subscription on the same project as the topic, conforming
to the
[resource name format](https://cloud.google.com/pubsub/docs/overview#names).
The generated name is populated in the returned Subscription object.
Note that for REST API requests, you must specify a name in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSubscription(self, request, context):
"""Gets the configuration details of a subscription.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateSubscription(self, request, context):
"""Updates an existing subscription. Note that certain properties of a
subscription, such as its topic, are not modifiable.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSubscriptions(self, request, context):
"""Lists matching subscriptions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSubscription(self, request, context):
"""Deletes an existing subscription. All messages retained in the subscription
are immediately dropped. Calls to `Pull` after deletion will return
`NOT_FOUND`. After a subscription is deleted, a new one may be created with
the same name, but the new one has no association with the old
subscription or its topic unless the same topic is specified.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyAckDeadline(self, request, context):
"""Modifies the ack deadline for a specific message. This method is useful
to indicate that more time is needed to process a message by the
subscriber, or to make the message available for redelivery if the
processing was interrupted. Note that this does not modify the
subscription-level `ackDeadlineSeconds` used for subsequent messages.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Acknowledge(self, request, context):
"""Acknowledges the messages associated with the `ack_ids` in the
`AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages
from the subscription.
Acknowledging a message whose ack deadline has expired may succeed,
but such a message may be redelivered later. Acknowledging a message more
than once will not result in an error.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Pull(self, request, context):
"""Pulls messages from the server. Returns an empty list if there are no
messages available in the backlog. The server may return `UNAVAILABLE` if
there are too many concurrent pull requests pending for the given
subscription.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamingPull(self, request_iterator, context):
"""(EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
respond with UNIMPLEMENTED errors unless you have been invited to test
this feature. Contact cloud-pubsub@google.com with any questions.
Establishes a stream with the server, which sends messages down to the
client. The client streams acknowledgements and ack deadline modifications
back to the server. The server will close the stream and return the status
on any error. The server may close the stream with status `OK` to reassign
server-side resources, in which case, the client should re-establish the
stream. `UNAVAILABLE` may also be returned in the case of a transient error
(e.g., a server restart). These should also be retried by the client. Flow
control can be achieved by configuring the underlying RPC channel.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyPushConfig(self, request, context):
"""Modifies the `PushConfig` for a specified subscription.
This may be used to change a push subscription to a pull one (signified by
an empty `PushConfig`) or vice versa, or change the endpoint URL and other
attributes of a push subscription. Messages will accumulate for delivery
continuously through the call regardless of changes to the `PushConfig`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSnapshots(self, request, context):
"""Lists the existing snapshots.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateSnapshot(self, request, context):
"""Creates a snapshot from the requested subscription.
If the snapshot already exists, returns `ALREADY_EXISTS`.
If the requested subscription doesn't exist, returns `NOT_FOUND`.
If the name is not provided in the request, the server will assign a random
name for this snapshot on the same project as the subscription, conforming
to the
[resource name format](https://cloud.google.com/pubsub/docs/overview#names).
The generated name is populated in the returned Snapshot object.
Note that for REST API requests, you must specify a name in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSnapshot(self, request, context):
"""Removes an existing snapshot. All messages retained in the snapshot
are immediately dropped. After a snapshot is deleted, a new one may be
created with the same name, but the new one has no association with the old
snapshot or its subscription, unless the same subscription is specified.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Seek(self, request, context):
"""Seeks an existing subscription to a point in time or to a given snapshot,
whichever is provided in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SubscriberServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateSubscription': grpc.unary_unary_rpc_method_handler(
servicer.CreateSubscription,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.SerializeToString,
),
'GetSubscription': grpc.unary_unary_rpc_method_handler(
servicer.GetSubscription,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.GetSubscriptionRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.SerializeToString,
),
'UpdateSubscription': grpc.unary_unary_rpc_method_handler(
servicer.UpdateSubscription,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.UpdateSubscriptionRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Subscription.SerializeToString,
),
'ListSubscriptions': grpc.unary_unary_rpc_method_handler(
servicer.ListSubscriptions,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSubscriptionsRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSubscriptionsResponse.SerializeToString,
),
'DeleteSubscription': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSubscription,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSubscriptionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ModifyAckDeadline': grpc.unary_unary_rpc_method_handler(
servicer.ModifyAckDeadline,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ModifyAckDeadlineRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Acknowledge': grpc.unary_unary_rpc_method_handler(
servicer.Acknowledge,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.AcknowledgeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Pull': grpc.unary_unary_rpc_method_handler(
servicer.Pull,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PullRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PullResponse.SerializeToString,
),
'StreamingPull': grpc.stream_stream_rpc_method_handler(
servicer.StreamingPull,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.StreamingPullRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.StreamingPullResponse.SerializeToString,
),
'ModifyPushConfig': grpc.unary_unary_rpc_method_handler(
servicer.ModifyPushConfig,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ModifyPushConfigRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListSnapshots': grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListSnapshotsResponse.SerializeToString,
),
'CreateSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.CreateSnapshot,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.CreateSnapshotRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Snapshot.SerializeToString,
),
'DeleteSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Seek': grpc.unary_unary_rpc_method_handler(
servicer.Seek,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.SeekRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.SeekResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.pubsub.v1.Subscriber', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PublisherStub(object):
"""The service that an application uses to manipulate topics, and to send
messages to a topic.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTopic = channel.unary_unary(
'/google.pubsub.v1.Publisher/CreateTopic',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString,
)
self.Publish = channel.unary_unary(
'/google.pubsub.v1.Publisher/Publish',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishResponse.FromString,
)
self.GetTopic = channel.unary_unary(
'/google.pubsub.v1.Publisher/GetTopic',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.GetTopicRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString,
)
self.ListTopics = channel.unary_unary(
'/google.pubsub.v1.Publisher/ListTopics',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicsResponse.FromString,
)
self.ListTopicSubscriptions = channel.unary_unary(
'/google.pubsub.v1.Publisher/ListTopicSubscriptions',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicSubscriptionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicSubscriptionsResponse.FromString,
)
self.DeleteTopic = channel.unary_unary(
'/google.pubsub.v1.Publisher/DeleteTopic',
request_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteTopicRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class PublisherServicer(object):
"""The service that an application uses to manipulate topics, and to send
messages to a topic.
"""
def CreateTopic(self, request, context):
"""Creates the given topic with the given name.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Publish(self, request, context):
"""Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic
does not exist. The message payload must not be empty; it must contain
either a non-empty data field, or at least one attribute.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTopic(self, request, context):
"""Gets the configuration of a topic.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTopics(self, request, context):
"""Lists matching topics.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTopicSubscriptions(self, request, context):
"""Lists the name of the subscriptions for this topic.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTopic(self, request, context):
"""Deletes the topic with the given name. Returns `NOT_FOUND` if the topic
does not exist. After a topic is deleted, a new topic may be created with
the same name; this is an entirely new topic with none of the old
configuration or subscriptions. Existing subscriptions to this topic are
not deleted, but their `topic` field is set to `_deleted-topic_`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PublisherServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateTopic': grpc.unary_unary_rpc_method_handler(
servicer.CreateTopic,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString,
),
'Publish': grpc.unary_unary_rpc_method_handler(
servicer.Publish,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.PublishResponse.SerializeToString,
),
'GetTopic': grpc.unary_unary_rpc_method_handler(
servicer.GetTopic,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.GetTopicRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.Topic.SerializeToString,
),
'ListTopics': grpc.unary_unary_rpc_method_handler(
servicer.ListTopics,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicsRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicsResponse.SerializeToString,
),
'ListTopicSubscriptions': grpc.unary_unary_rpc_method_handler(
servicer.ListTopicSubscriptions,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicSubscriptionsRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.ListTopicSubscriptionsResponse.SerializeToString,
),
'DeleteTopic': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTopic,
request_deserializer=google_dot_cloud_dot_proto_dot_pubsub_dot_v1_dot_pubsub__pb2.DeleteTopicRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.pubsub.v1.Publisher', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
|
# -*- coding: utf-8 -*-
from rest_framework import permissions
from rest_framework import exceptions
from addons.base.models import BaseAddonSettings
from osf.models import AbstractNode, OSFUser as User, Institution, DraftRegistration, PrivateLink, PreprintService, NodeRelation
from website.project.metadata.utils import is_prereg_admin
from website.util import permissions as osf_permissions
from api.base.utils import get_user_auth, is_deprecated
class ContributorOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, BaseAddonSettings):
obj = obj.owner
if isinstance(obj, PreprintService):
obj = obj.node
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node, NodeRelation, PreprintService, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.can_edit(auth)
class IsPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode)), 'obj must be an Node got {}'.format(obj)
auth = get_user_auth(request)
return obj.is_public or obj.can_view(auth)
class IsAdmin(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, DraftRegistration, PrivateLink)), 'obj must be an Node, Draft Registration, or PrivateLink, got {}'.format(obj)
auth = get_user_auth(request)
node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
return node.has_permission(auth.user, osf_permissions.ADMIN)
class IsAdminOrReviewer(permissions.BasePermission):
"""
Prereg admins can update draft registrations.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, DraftRegistration, PrivateLink)), 'obj must be an Node, Draft Registration, or PrivateLink, got {}'.format(obj)
auth = get_user_auth(request)
node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
if request.method != 'DELETE' and is_prereg_admin(auth.user):
return True
return node.has_permission(auth.user, osf_permissions.ADMIN)
class AdminOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, User, Institution, BaseAddonSettings, DraftRegistration, PrivateLink)), 'obj must be an Node, User, Institution, Draft Registration, PrivateLink, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ExcludeWithdrawals(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return False
return True
class ContributorDetailPermissions(permissions.BasePermission):
"""Permissions for contributor detail page."""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, User)), 'obj must be User or Node, got {}'.format(obj)
auth = get_user_auth(request)
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
user = User.load(context['user_id'])
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
elif request.method == 'DELETE':
return node.has_permission(auth.user, osf_permissions.ADMIN) or auth.user == user
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ContributorOrPublicForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node or NodeRelation, got {}'.format(obj)
auth = get_user_auth(request)
parent_node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
pointer_node = NodeRelation.load(request.parser_context['kwargs']['node_link_id']).child
if request.method in permissions.SAFE_METHODS:
has_parent_auth = parent_node.can_view(auth)
has_pointer_auth = pointer_node.can_view(auth)
public = pointer_node.is_public
has_auth = public or (has_parent_auth and has_pointer_auth)
return has_auth
else:
has_auth = parent_node.can_edit(auth)
return has_auth
class ContributorOrPublicForRelationshipPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
parent_node = obj['self']
if request.method in permissions.SAFE_METHODS:
return parent_node.can_view(auth)
elif request.method == 'DELETE':
return parent_node.can_edit(auth)
else:
has_parent_auth = parent_node.can_edit(auth)
if not has_parent_auth:
return False
pointer_nodes = []
for pointer in request.data.get('data', []):
node = AbstractNode.load(pointer['id'])
if not node or node.is_collection:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_nodes.append(node)
has_pointer_auth = True
for pointer in pointer_nodes:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
class RegistrationAndPermissionCheckForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
node_link = NodeRelation.load(request.parser_context['kwargs']['node_link_id'])
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
auth = get_user_auth(request)
if request.method == 'DELETE'and node.is_registration:
raise exceptions.MethodNotAllowed(method=request.method)
if node.is_collection or node.is_registration:
raise exceptions.NotFound
if node_link.node.is_registration:
if request.method not in permissions.SAFE_METHODS:
raise exceptions.MethodNotAllowed
if node != node_link.parent:
raise exceptions.NotFound
if request.method == 'DELETE' and not node.can_edit(auth):
return False
return True
class WriteOrPublicForRelationshipInstitutions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
node = obj['self']
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.WRITE)
class ReadOnlyIfRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
def has_object_permission(self, request, view, obj):
if not isinstance(obj, AbstractNode):
obj = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
assert isinstance(obj, AbstractNode), 'obj must be an Node'
if obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class ShowIfVersion(permissions.BasePermission):
def __init__(self, min_version, max_version, deprecated_message):
super(ShowIfVersion, self).__init__()
self.min_version = min_version
self.max_version = max_version
self.deprecated_message = deprecated_message
def has_object_permission(self, request, view, obj):
if is_deprecated(request.version, self.min_version, self.max_version):
raise exceptions.NotFound(detail=self.deprecated_message)
return True
class NodeLinksShowIfVersion(ShowIfVersion):
def __init__(self):
min_version = '2.0'
max_version = '2.0'
deprecated_message = 'This feature is deprecated as of version 2.1'
super(NodeLinksShowIfVersion, self).__init__(min_version, max_version, deprecated_message)
|
|
#!/usr/bin/env python
from __future__ import division, print_function
import numpy as np
import argparse
import re
import os, sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.axes_grid1 import make_axes_locatable
from ase.data import covalent_radii
from ase.data.colors import cpk_colors
import asetk.format.cube as cube
import asetk.format.qe as qe
import asetk.format.igor as igor
# Define command line parser
parser = argparse.ArgumentParser(
description='Plots movie from Gaussian Cube file.')
parser.add_argument('--version', action='version', version='%(prog)s 31.01.2017')
parser.add_argument(
'--cubes',
nargs='+',
metavar='FILENAME',
default=[],
help='Cube files')
parser.add_argument(
'--qe_cubes',
nargs='+',
metavar='FILENAME',
default=[],
help='Files in QE intermediate cube file format as written by pp.x')
parser.add_argument(
'--sts_cubes',
nargs='+',
metavar='FILENAME',
default=[],
help='STS cube files')
parser.add_argument(
'--normal',
metavar='DIRECTION',
default='z',
help='Direction along which to make the movie. May be "x", "y" or "z".')
#parser.add_argument(
# '--replicate',
# default=None,
# nargs=2,
# type=int,
# metavar='INT',
# help='Number of replica along x and y.\
# If just one number is specified, it is taken for both x and y.')
#parser.add_argument(
# '--stride',
# default=(1,1),
# nargs=2,
# type=float,
# metavar='INT',
# help='If specified, the data will be resampled on a cartesian grid. \
# --stride 0.5 0.5 will result in a grid twice as fine as the \
# original grid of the cube file.')
#parser.add_argument(
# '--resample',
# default=None,
# nargs=2,
# type=int,
# metavar='INT',
# help='If specified, the data will be resampled on a cartesian grid of \
# nx x ny points.')
parser.add_argument(
'--format',
metavar='STRING',
default='png',
help='Specifies format of output. Can be \'png\' (collection of pngs) or\
\'igor\' (igor text format of Igor Pro) or \'mp4\' (movie).'
)
parser.add_argument(
'--plotrange',
nargs=2,
metavar='VALUE',
default=None,
type=float,
help='If specified, color scale in plot will range from 1st value \
to 2nd value.')
parser.add_argument(
'--ffmpeg_path',
metavar='FILEPATH',
default='/opt/local/bin/ffmpeg',
help='Specify path to ffmpeg (required for mp4 movies).')
parser.add_argument(
'--atom_sketch',
dest='atom_sketch',
action='store_true',
help='If specified, overlay sketch of atoms on top of images.'
)
parser.add_argument(
'--atom_zcut',
metavar='DISTANCE',
default=None,
type=float,
help='If specified, cuts atoms below zcut for the atomic sketch.'
)
args = parser.parse_args()
if args.format not in ['png','mp4','igor']:
raise ValueError("Only png and mp4 format currently supported.")
plt.rcParams[u'animation.ffmpeg_path'] = args.ffmpeg_path.decode('utf8')
dir_index = cube.Cube.dir_indices[args.normal]
# Iterate over supplied cube files
for fname in args.cubes + args.qe_cubes + args.sts_cubes:
print("\nReading {n} ".format(n=fname))
if fname in args.cubes:
cformat = 'cube'
c = cube.Cube.from_file(fname, read_data=True)
elif fname in args.sts_cubes:
cformat = 'sts_cube'
c = cube.STSCube.from_file(fname, read_data=True)
elif fname in args.qe_cubes:
cformat = 'qe_cube'
tmp = qe.QECube.from_file(fname, read_data=True)
c = tmp.to_cube()
name = os.path.splitext(fname)[0]
vmax = np.amax(c.data)
vmin = np.amin(c.data)
fig, (ax) = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(6,5))
plt.subplots_adjust(left=0.10)
ax.set_xlabel('x [$\mathrm{\AA}$]')
ax.set_ylabel('y [$\mathrm{\AA}$]')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
ax.set_title("{}".format(c.title))
if cformat == 'sts_cube':
label_text = "U = {:.3f} V"
else:
label_text = "z = {:.3f}"
z0 = c.origin[dir_index]
nz = c.data.shape[dir_index]
dz = np.linalg.norm(c.cell[dir_index]) / nz
if args.atom_sketch:
if args.atom_zcut is None:
draw_atoms = c.atoms
else:
draw_atoms = [at for at in c.atoms if at.position[2] > args.atom_zcut]
for at in draw_atoms:
x,y,z = at.position
n = at.number
ax.add_artist(plt.Circle((x,y), covalent_radii[n], color=cpk_colors[n], fill=False, clip_on=True))
ims = []
for i in range(nz):
p = c.get_plane(args.normal,i,return_object=True)
if args.format in ['png','mp4']:
im = ax.imshow(p.imdata, norm=plt.Normalize(vmin,vmax),
extent=p.extent, cmap=matplotlib.cm.bwr)
label = ax.text(0.8, 0.9,label_text.format(z0+i*dz),
horizontalalignment='center', verticalalignment='center',
transform = ax.transAxes)
if i == 0:
plt.colorbar(im, cax=cax)
if args.format=='png':
outname = "{}_{}_{:04d}.png".format(name,args.normal,i)
print("Saving {}".format(outname), end='\r')
sys.stdout.flush()
plt.savefig(outname, dpi=200)
label.remove()
im.remove()
elif args.format=='mp4':
ims.append( (im, label,) )
elif args.format == 'igor':
igorwave = igor.Wave2d(
data=p.data,
xmin=p.extent[0],
xdelta=np.linalg.norm(p.dx),
xlabel='x [Angstroms]',
ymin=p.extent[2],
ydelta=np.linalg.norm(p.dy),
ylabel='y [Angstroms]',
)
outname = "{}_{}_{:04d}.itx".format(name,args.normal,i)
print("Writing {} ".format(outname), end='\r')
igorwave.write(outname)
if args.format =='mp4':
im_ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=3000,
blit=True)
mfile = "{}_{}.mp4".format(name,args.normal)
print("Making movie {}".format(mfile))
writer = animation.FFMpegWriter(fps=15, metadata=dict(artist='Me'), bitrate=1800)
#writer = animation.MencoderWriter(fps=15, bitrate=1800)
im_ani.save(mfile, writer=writer)
|
|
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package manipulates Docker image layer metadata."""
from collections import namedtuple
import copy
import json
import os
import os.path
import sys
from tools.build_defs.docker import utils
from third_party.py import gflags
gflags.DEFINE_string(
'name', None, 'The name of the current layer')
gflags.DEFINE_string(
'base', None, 'The parent image')
gflags.DEFINE_string(
'output', None, 'The output file to generate')
gflags.DEFINE_string(
'layer', None, 'The current layer tar')
gflags.DEFINE_list(
'entrypoint', None,
'Override the "Entrypoint" of the previous layer')
gflags.DEFINE_list(
'command', None,
'Override the "Cmd" of the previous layer')
gflags.DEFINE_string(
'user', None, 'The username to run commands under')
gflags.DEFINE_list('labels', None, 'Augment the "Label" of the previous layer')
gflags.DEFINE_list(
'ports', None,
'Augment the "ExposedPorts" of the previous layer')
gflags.DEFINE_list(
'volumes', None,
'Augment the "Volumes" of the previous layer')
gflags.DEFINE_string(
'workdir', None,
'Set the working directory for the layer')
gflags.DEFINE_list(
'env', None,
'Augment the "Env" of the previous layer')
FLAGS = gflags.FLAGS
_MetadataOptionsT = namedtuple('MetadataOptionsT',
['name', 'parent', 'size', 'entrypoint', 'cmd',
'env', 'labels', 'ports', 'volumes', 'workdir',
'user'])
class MetadataOptions(_MetadataOptionsT):
"""Docker image layer metadata options."""
def __new__(cls,
name=None,
parent=None,
size=None,
entrypoint=None,
cmd=None,
user=None,
labels=None,
env=None,
ports=None,
volumes=None,
workdir=None):
"""Constructor."""
return super(MetadataOptions, cls).__new__(cls,
name=name,
parent=parent,
size=size,
entrypoint=entrypoint,
cmd=cmd,
user=user,
labels=labels,
env=env,
ports=ports,
volumes=volumes,
workdir=workdir)
_DOCKER_VERSION = '1.5.0'
_PROCESSOR_ARCHITECTURE = 'amd64'
_OPERATING_SYSTEM = 'linux'
def Resolve(value, environment):
"""Resolves environment variables embedded in the given value."""
outer_env = os.environ
try:
os.environ = environment
return os.path.expandvars(value)
finally:
os.environ = outer_env
def DeepCopySkipNull(data):
"""Do a deep copy, skipping null entry."""
if type(data) == type(dict()):
return dict((DeepCopySkipNull(k), DeepCopySkipNull(v))
for k, v in data.iteritems() if v is not None)
return copy.deepcopy(data)
def KeyValueToDict(pair):
"""Converts an iterable object of key=value pairs to dictionary."""
d = dict()
for kv in pair:
(k, v) = kv.split('=', 1)
d[k] = v
return d
def RewriteMetadata(data, options):
"""Rewrite and return a copy of the input data according to options.
Args:
data: The dict of Docker image layer metadata we're copying and rewriting.
options: The changes this layer makes to the overall image's metadata, which
first appears in this layer's version of the metadata
Returns:
A deep copy of data, which has been updated to reflect the metadata
additions of this layer.
Raises:
Exception: a required option was missing.
"""
output = DeepCopySkipNull(data)
if not options.name:
raise Exception('Missing required option: name')
output['id'] = options.name
if options.parent:
output['parent'] = options.parent
elif data:
raise Exception('Expected empty input object when parent is omitted')
if options.size:
output['Size'] = options.size
elif 'Size' in output:
del output['Size']
if 'config' not in output:
output['config'] = {}
if options.entrypoint:
output['config']['Entrypoint'] = options.entrypoint
if options.cmd:
output['config']['Cmd'] = options.cmd
if options.user:
output['config']['User'] = options.user
output['docker_version'] = _DOCKER_VERSION
output['architecture'] = _PROCESSOR_ARCHITECTURE
output['os'] = _OPERATING_SYSTEM
def Dict2ConfigValue(d):
return ['%s=%s' % (k, d[k]) for k in sorted(d.keys())]
if options.env:
# Build a dictionary of existing environment variables (used by Resolve).
environ_dict = KeyValueToDict(output['config'].get('Env', []))
# Merge in new environment variables, resolving references.
for k, v in options.env.iteritems():
# Resolve handles scenarios like "PATH=$PATH:...".
environ_dict[k] = Resolve(v, environ_dict)
output['config']['Env'] = Dict2ConfigValue(environ_dict)
if options.labels:
label_dict = KeyValueToDict(output['config'].get('Label', []))
for k, v in options.labels.iteritems():
label_dict[k] = v
output['config']['Label'] = Dict2ConfigValue(label_dict)
if options.ports:
if 'ExposedPorts' not in output['config']:
output['config']['ExposedPorts'] = {}
for p in options.ports:
if '/' in p:
# The port spec has the form 80/tcp, 1234/udp
# so we simply use it as the key.
output['config']['ExposedPorts'][p] = {}
else:
# Assume tcp
output['config']['ExposedPorts'][p + '/tcp'] = {}
if options.volumes:
if 'Volumes' not in output['config']:
output['config']['Volumes'] = {}
for p in options.volumes:
output['config']['Volumes'][p] = {}
if options.workdir:
output['config']['WorkingDir'] = options.workdir
# TODO(mattmoor): comment, created, container_config
# container_config contains information about the container
# that was used to create this layer, so it shouldn't
# propagate from the parent to child. This is where we would
# annotate information that can be extract by tools like Blubber
# or Quay.io's UI to gain insight into the source that generated
# the layer. A Dockerfile might produce something like:
# # (nop) /bin/sh -c "apt-get update"
# We might consider encoding the fully-qualified bazel build target:
# //tools/build_defs/docker:image
# However, we should be sensitive to leaking data through this field.
if 'container_config' in output:
del output['container_config']
return output
def GetParentIdentifier(f):
"""Try to look at the parent identifier from a docker image.
The identifier is expected to be in the 'top' file for our rule so we look at
it first ('./top', 'top'). If it's not found, then we use the 'repositories'
file and tries to parse it to get the first declared repository (so we can
actually parse a file generated by 'docker save').
Args:
f: the input tar file.
Returns:
The identifier of the docker image, or None if no identifier was found.
"""
# TODO(dmarting): Maybe we could drop the 'top' file all together?
top = utils.GetTarFile(f, 'top')
if top:
return top.strip()
repositories = utils.GetTarFile(f, 'repositories')
if repositories:
data = json.loads(repositories)
for k1 in data:
for k2 in data[k1]:
# Returns the first found key
return data[k1][k2].strip()
return None
def main(unused_argv):
parent = ''
base_json = '{}'
if FLAGS.base:
parent = GetParentIdentifier(FLAGS.base)
if parent:
base_json = utils.GetTarFile(FLAGS.base, '%s/json' % parent)
data = json.loads(base_json)
name = FLAGS.name
if name.startswith('@'):
with open(name[1:], 'r') as f:
name = f.read()
labels = KeyValueToDict(FLAGS.labels)
for label, value in labels.iteritems():
if value.startswith('@'):
with open(value[1:], 'r') as f:
labels[label] = f.read()
output = RewriteMetadata(data,
MetadataOptions(name=name,
parent=parent,
size=os.path.getsize(FLAGS.layer),
entrypoint=FLAGS.entrypoint,
cmd=FLAGS.command,
user=FLAGS.user,
labels=labels,
env=KeyValueToDict(FLAGS.env),
ports=FLAGS.ports,
volumes=FLAGS.volumes,
workdir=FLAGS.workdir))
with open(FLAGS.output, 'w') as fp:
json.dump(output, fp, sort_keys=True)
fp.write('\n')
if __name__ == '__main__':
main(FLAGS(sys.argv))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.group_by_window()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
# NOTE(mrry): These tests are based on the tests in bucket_ops_test.py.
# Currently, they use a constant batch size, though should be made to use a
# different batch size per key.
class GroupByWindowTest(test_base.DatasetTestBase, parameterized.TestCase):
def _dynamicPad(self, bucket, window, window_size):
# TODO(mrry): To match `tf.contrib.training.bucket()`, implement a
# generic form of padded_batch that pads every component
# dynamically and does not rely on static shape information about
# the arguments.
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape(
[None]), tensor_shape.TensorShape([3])))))
@combinations.generate(test_base.default_test_combinations())
def testSingleBucket(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(32)).map(_map_fn)
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: 0,
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
which_bucket, bucketed_values = self.evaluate(get_next())
self.assertEqual(0, which_bucket)
expected_scalar_int = np.arange(32, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31)).astype(np.int64)
for i in range(32):
expected_unk_int64[i, :i] = i
expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values[0])
self.assertAllEqual(expected_unk_int64, bucketed_values[1])
self.assertAllEqual(expected_vec3_str, bucketed_values[2])
@combinations.generate(test_base.default_test_combinations())
def testEvenOddBuckets(self):
def _map_fn(v):
return (v, array_ops.fill([v], v),
array_ops.fill([3], string_ops.as_string(v)))
input_dataset = dataset_ops.Dataset.from_tensor_slices(
math_ops.range(64)).map(_map_fn)
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda x, y, z: math_ops.cast(x % 2, dtypes.int64),
lambda k, bucket: self._dynamicPad(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
# Get two minibatches (one containing even values, one containing odds)
which_bucket_even, bucketed_values_even = self.evaluate(get_next())
which_bucket_odd, bucketed_values_odd = self.evaluate(get_next())
# Count number of bucket_tensors.
self.assertEqual(3, len(bucketed_values_even))
self.assertEqual(3, len(bucketed_values_odd))
# Ensure bucket 0 was used for all minibatch entries.
self.assertAllEqual(0, which_bucket_even)
self.assertAllEqual(1, which_bucket_odd)
# Test the first bucket outputted, the events starting at 0
expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i] = 2 * i
expected_vec3_str = np.vstack(
3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_even[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_even[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_even[2])
# Test the second bucket outputted, the odds starting at 1
expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64)
expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64)
for i in range(0, 32):
expected_unk_int64[i, :2 * i + 1] = 2 * i + 1
expected_vec3_str = np.vstack(
3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T
self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0])
self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1])
self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2])
@combinations.generate(test_base.default_test_combinations())
def testEvenOddBucketsFilterOutAllOdd(self):
def _map_fn(v):
return {
"x": v,
"y": array_ops.fill([v], v),
"z": array_ops.fill([3], string_ops.as_string(v))
}
def _dynamic_pad_fn(bucket, window, _):
return dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensors(bucket),
window.padded_batch(
32, {
"x": tensor_shape.TensorShape([]),
"y": tensor_shape.TensorShape([None]),
"z": tensor_shape.TensorShape([3])
})))
input_dataset = dataset_ops.Dataset.from_tensor_slices(math_ops.range(
128)).map(_map_fn).filter(lambda d: math_ops.equal(d["x"] % 2, 0))
bucketed_dataset = input_dataset.apply(
grouping.group_by_window(
lambda d: math_ops.cast(d["x"] % 2, dtypes.int64),
lambda k, bucket: _dynamic_pad_fn(k, bucket, 32), 32))
get_next = self.getNext(bucketed_dataset)
# Get two minibatches ([0, 2, ...] and [64, 66, ...])
which_bucket0, bucketed_values_even0 = self.evaluate(get_next())
which_bucket1, bucketed_values_even1 = self.evaluate(get_next())
# Ensure that bucket 1 was completely filtered out
self.assertAllEqual(0, which_bucket0)
self.assertAllEqual(0, which_bucket1)
self.assertAllEqual(
np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"])
self.assertAllEqual(
np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"])
@combinations.generate(test_base.default_test_combinations())
def testDynamicWindowSize(self):
components = np.arange(100).astype(np.int64)
# Key fn: even/odd
# Reduce fn: batches of 5
# Window size fn: even=5, odd=10
def window_size_func(key):
window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64)
return window_sizes[key]
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(20),
None, window_size_func))
get_next = self.getNext(dataset)
with self.assertRaises(errors.OutOfRangeError):
batches = 0
while True:
result = self.evaluate(get_next())
is_even = all(x % 2 == 0 for x in result)
is_odd = all(x % 2 == 1 for x in result)
self.assertTrue(is_even or is_odd)
expected_batch_size = 5 if is_even else 10
self.assertEqual(expected_batch_size, result.shape[0])
batches += 1
self.assertEqual(batches, 15)
@combinations.generate(test_base.default_test_combinations())
def testSimple(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: x * x).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4),
4))
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
result = self.evaluate(get_next())
self.assertTrue(
all(x % 2 == 0 for x in result) or all(x % 2 == 1) for x in result)
counts.append(result.shape[0])
self.assertEqual(len(components), sum(counts))
num_full_batches = len([c for c in counts if c == 4])
self.assertGreaterEqual(num_full_batches, 24)
self.assertTrue(all(c == 4 for c in counts[:num_full_batches]))
@combinations.generate(test_base.default_test_combinations())
def testImmediateOutput(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
-1).apply(
grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4),
4))
get_next = self.getNext(dataset)
# The input is infinite, so this test demonstrates that:
# 1. We produce output without having to consume the entire input,
# 2. Different buckets can produce output at different rates, and
# 3. For deterministic input, the output is deterministic.
for _ in range(3):
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
self.assertAllEqual([2, 2, 2, 2], self.evaluate(get_next()))
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testSmallGroups(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4))
get_next = self.getNext(dataset)
self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next()))
# The small outputs at the end are deterministically produced in key
# order.
self.assertAllEqual([0, 0, 0], self.evaluate(get_next()))
self.assertAllEqual([1], self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testEmpty(self):
dataset = dataset_ops.Dataset.range(4).apply(
grouping.group_by_window(lambda _: 0, lambda _, xs: xs, 0))
get_next = self.getNext(dataset)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Window size must be greater than zero, but got 0."):
print(self.evaluate(get_next()))
@combinations.generate(test_base.default_test_combinations())
def testReduceFuncError(self):
components = np.random.randint(100, size=(200,)).astype(np.int64)
def reduce_func(_, xs):
# Introduce an incorrect padded shape that cannot (currently) be
# detected at graph construction time.
return xs.padded_batch(
4,
padded_shapes=(tensor_shape.TensorShape([]),
constant_op.constant([5], dtype=dtypes.int64) * -1))
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: (x, ops.convert_to_tensor([x * x]))).apply(
grouping.group_by_window(lambda x, _: x % 2, reduce_func, 32))
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConsumeWindowDatasetMoreThanOnce(self):
components = np.random.randint(50, size=(200,)).astype(np.int64)
def reduce_func(key, window):
# Apply two different kinds of padding to the input: tight
# padding, and quantized (to a multiple of 10) padding.
return dataset_ops.Dataset.zip((
window.padded_batch(
4, padded_shapes=tensor_shape.TensorShape([None])),
window.padded_batch(
4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])),
))
dataset = dataset_ops.Dataset.from_tensor_slices(
components
).map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x)).apply(
grouping.group_by_window(
lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64),
reduce_func, 4))
get_next = self.getNext(dataset)
counts = []
with self.assertRaises(errors.OutOfRangeError):
while True:
tight_result, multiple_of_10_result = self.evaluate(get_next())
self.assertEqual(0, multiple_of_10_result.shape[1] % 10)
self.assertAllEqual(tight_result,
multiple_of_10_result[:, :tight_result.shape[1]])
counts.append(tight_result.shape[0])
self.assertEqual(len(components), sum(counts))
@combinations.generate(test_base.default_test_combinations())
def testShortCircuit(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(
grouping.group_by_window(lambda x: x, lambda _, window: window.batch(1),
1))
self.assertDatasetProduces(
dataset, expected_output=[[i] for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testGroupByWindowWithAutotune(self):
dataset = dataset_ops.Dataset.range(1000).apply(
grouping.group_by_window(
lambda x: x // 10,
lambda key, window: dataset_ops.Dataset.from_tensors(key), 4))
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)
get_next = self.getNext(dataset)
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testGroupByWindowCardinality(self):
dataset = dataset_ops.Dataset.range(1).repeat().apply(
grouping.group_by_window(
lambda x: x % 2,
lambda key, window: dataset_ops.Dataset.from_tensors(key), 4))
self.assertEqual(self.evaluate(dataset.cardinality()), dataset_ops.INFINITE)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import io
import fixtures as fx
import futurist
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslotest import output
import sqlalchemy
import testtools
from nova.compute import rpcapi as compute_rpcapi
from nova import conductor
from nova import context
from nova.db.sqlalchemy import api as session
from nova import exception
from nova.network import neutron as neutron_api
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import test
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import fake_instance
from nova import utils
CONF = cfg.CONF
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
class TestOSAPIFixture(testtools.TestCase):
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.objects.Service.create')
@mock.patch('nova.utils.raise_if_old_compute', new=mock.Mock())
def test_responds_to_version(self, mock_service_create, mock_get):
"""Ensure the OSAPI server responds to calls sensibly."""
self.useFixture(output.CaptureOutput())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
# request the API root, which provides us the versions of the API
resp = api.api_request('/', strip_version=True)
self.assertEqual(200, resp.status_code, resp.content)
# request a bad root url, should be a 404
#
# NOTE(sdague): this currently fails, as it falls into the 300
# dispatcher instead. This is a bug. The test case is left in
# here, commented out until we can address it.
#
# resp = api.api_request('/foo', strip_version=True)
# self.assertEqual(resp.status_code, 400, resp.content)
# request a known bad url, and we should get a 404
resp = api.api_request('/foo')
self.assertEqual(404, resp.status_code, resp.content)
class TestDatabaseFixture(testtools.TestCase):
def test_fixture_reset(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
engine = session.get_engine()
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
# insert a 6th instance type, column 5 below is an int id
# which has a constraint on it, so if new standard instance
# types are added you have to bump it.
conn.execute("insert into instance_types VALUES "
"(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'"
", 1.0, 40, 0, 0, 1, 0)")
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database())
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_api_fixture_reset(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database(database='api'))
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_fixture_cleanup(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database()
self.useFixture(fix)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the db contains nothing
engine = session.get_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def test_api_fixture_cleanup(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database(database='api')
self.useFixture(fix)
# No data inserted by migrations so we need to add a row
engine = session.get_api_engine()
conn = engine.connect()
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# Manually do the cleanup that addCleanup will do
fix.cleanup()
# Ensure the db contains nothing
engine = session.get_api_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
def test_fixture_schema_version(self):
self.useFixture(conf_fixture.ConfFixture())
# In/after 317 aggregates did have uuid
self.useFixture(fixtures.Database(version=318))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertTrue(hasattr(aggregate.c, 'uuid'))
# Before 317, aggregates had no uuid
self.useFixture(fixtures.Database(version=316))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertFalse(hasattr(aggregate.c, 'uuid'))
engine.dispose()
def test_fixture_after_database_fixture(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(version=318))
class TestDefaultFlavorsFixture(testtools.TestCase):
@mock.patch("nova.objects.flavor.Flavor._send_notification")
def test_flavors(self, mock_send_notification):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
self.useFixture(fixtures.DefaultFlavorsFixture())
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(6, len(rows), "Rows %s" % rows)
class TestIndirectionAPIFixture(testtools.TestCase):
def test_indirection_api(self):
# Should initially be None
self.assertIsNone(obj_base.NovaObject.indirection_api)
# make sure the fixture correctly sets the value
fix = fixtures.IndirectionAPIFixture('foo')
self.useFixture(fix)
self.assertEqual('foo', obj_base.NovaObject.indirection_api)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the initial value is restored
self.assertIsNone(obj_base.NovaObject.indirection_api)
class TestSpawnIsSynchronousFixture(testtools.TestCase):
def test_spawn_patch(self):
orig_spawn = utils.spawn_n
fix = fixtures.SpawnIsSynchronousFixture()
self.useFixture(fix)
self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_spawn_passes_through(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
tester = mock.MagicMock()
utils.spawn_n(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_n_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
def test_spawn_n_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
class TestSynchronousThreadPoolExecutorFixture(testtools.TestCase):
def test_submit_passes_through(self):
self.useFixture(fixtures.SynchronousThreadPoolExecutorFixture())
tester = mock.MagicMock()
executor = futurist.GreenThreadPoolExecutor()
future = executor.submit(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
result = future.result()
self.assertEqual(tester.function.return_value, result)
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with fixtures.BannedDBSchemaOperations(['Column']):
self.assertRaises(exception.DBNotAllowed,
column.drop)
self.assertRaises(exception.DBNotAllowed,
column.alter)
def test_table(self):
table = sqlalchemy.Table()
with fixtures.BannedDBSchemaOperations(['Table']):
self.assertRaises(exception.DBNotAllowed,
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
class TestAllServicesCurrentFixture(testtools.TestCase):
@mock.patch('nova.objects.Service._db_service_get_minimum_version')
def test_services_current(self, mock_db):
mock_db.return_value = {'nova-compute': 123}
self.assertEqual(123, service_obj.Service.get_minimum_version(
None, 'nova-compute'))
mock_db.assert_called_once_with(None, ['nova-compute'],
use_slave=False)
mock_db.reset_mock()
compute_rpcapi.LAST_VERSION = 123
self.useFixture(fixtures.AllServicesCurrent())
self.assertIsNone(compute_rpcapi.LAST_VERSION)
self.assertEqual(service_obj.SERVICE_VERSION,
service_obj.Service.get_minimum_version(
None, 'nova-compute'))
self.assertFalse(mock_db.called)
class TestNoopConductorFixture(testtools.TestCase):
@mock.patch('nova.conductor.api.ComputeTaskAPI.resize_instance')
def test_task_api_not_called(self, mock_resize):
self.useFixture(fixtures.NoopConductorFixture())
conductor.ComputeTaskAPI().resize_instance()
self.assertFalse(mock_resize.called)
@mock.patch('nova.conductor.api.API.wait_until_ready')
def test_api_not_called(self, mock_wait):
self.useFixture(fixtures.NoopConductorFixture())
conductor.API().wait_until_ready()
self.assertFalse(mock_wait.called)
class TestSingleCellSimpleFixture(testtools.TestCase):
def test_single_cell(self):
self.useFixture(fixtures.SingleCellSimple())
cml = objects.CellMappingList.get_all(None)
self.assertEqual(1, len(cml))
def test_target_cell(self):
self.useFixture(fixtures.SingleCellSimple())
with context.target_cell(mock.sentinel.context, None) as c:
self.assertIs(mock.sentinel.context, c)
class TestWarningsFixture(test.TestCase):
def test_invalid_uuid_errors(self):
"""Creating an oslo.versionedobject with an invalid UUID value for a
UUIDField should raise an exception.
"""
valid_migration_kwargs = {
"created_at": timeutils.utcnow().replace(microsecond=0),
"updated_at": None,
"deleted_at": None,
"deleted": False,
"id": 123,
"uuid": uuids.migration,
"source_compute": "compute-source",
"dest_compute": "compute-dest",
"source_node": "node-source",
"dest_node": "node-dest",
"dest_host": "host-dest",
"old_instance_type_id": 42,
"new_instance_type_id": 84,
"instance_uuid": "fake-uuid",
"status": "migrating",
"migration_type": "resize",
"hidden": False,
"memory_total": 123456,
"memory_processed": 12345,
"memory_remaining": 111111,
"disk_total": 234567,
"disk_processed": 23456,
"disk_remaining": 211111,
}
# this shall not throw FutureWarning
objects.migration.Migration(**valid_migration_kwargs)
invalid_migration_kwargs = copy.deepcopy(valid_migration_kwargs)
invalid_migration_kwargs["uuid"] = "fake_id"
self.assertRaises(FutureWarning, objects.migration.Migration,
**invalid_migration_kwargs)
class TestDownCellFixture(test.TestCase):
def test_fixture(self):
# The test setup creates two cell mappings (cell0 and cell1) by
# default. Let's first list servers across all cells while they are
# "up" to make sure that works as expected. We'll create a single
# instance in cell1.
ctxt = context.get_admin_context()
cell1 = self.cell_mappings[test.CELL1_NAME]
with context.target_cell(ctxt, cell1) as cctxt:
inst = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst:
delattr(inst, 'id')
inst.create()
# Now list all instances from all cells (should get one back).
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID]))
self.assertEqual(1, len(results[cell1.uuid]))
# Now do the same but with the DownCellFixture which should result
# in exception results from both cells.
with fixtures.DownCellFixture():
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for result in results.values():
self.assertIsInstance(result, db_exc.DBError)
def test_fixture_when_explicitly_passing_down_cell_mappings(self):
# The test setup creates two cell mappings (cell0 and cell1) by
# default. We'll create one instance per cell and pass cell0 as
# the down cell. We should thus get db_exc.DBError for cell0 and
# correct InstanceList object from cell1.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
with fixtures.DownCellFixture([cell0]):
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for cell_uuid, result in results.items():
if cell_uuid == cell0.uuid:
self.assertIsInstance(result, db_exc.DBError)
else:
self.assertIsInstance(result, objects.InstanceList)
self.assertEqual(1, len(result))
self.assertEqual(inst2.uuid, result[0].uuid)
def test_fixture_for_an_individual_down_cell_targeted_call(self):
# We have cell0 and cell1 by default in the setup. We try targeting
# both the cells. We should get a db error for the down cell and
# the correct result for the up cell.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
def dummy_tester(ctxt, cell_mapping, uuid):
with context.target_cell(ctxt, cell_mapping) as cctxt:
return objects.Instance.get_by_uuid(cctxt, uuid)
# Scenario A: We do not pass any down cells, fixture automatically
# assumes the targeted cell is down whether its cell0 or cell1.
with fixtures.DownCellFixture():
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell1, inst2.uuid)
# Scenario B: We pass cell0 as the down cell.
with fixtures.DownCellFixture([cell0]):
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell0, inst1.uuid)
# Scenario C: We get the correct result from the up cell
# when targeted.
result = dummy_tester(ctxt, cell1, inst2.uuid)
self.assertEqual(inst2.uuid, result.uuid)
class TestNeutronFixture(test.NoDBTestCase):
def setUp(self):
super(TestNeutronFixture, self).setUp()
self.neutron = self.useFixture(fixtures.NeutronFixture(self))
def test_list_ports_with_resource_request_non_admin_client(self):
ctxt = context.get_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNone(ports[0]['resource_request'])
def test_list_ports_with_resource_request_admin_client(self):
ctxt = context.get_admin_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNotNone(ports[0]['resource_request'])
class TestGlanceFixture(test.NoDBTestCase):
def setUp(self):
super().setUp()
self.image_service = self.useFixture(fixtures.GlanceFixture(self))
self.context = context.get_admin_context()
def test_detail(self):
res = self.image_service.detail(self.context)
for image in res:
self.assertEqual(
set(image.keys()),
{
'id', 'name', 'created_at', 'updated_at', 'deleted_at',
'deleted', 'status', 'is_public', 'properties',
'disk_format', 'container_format', 'size', 'min_disk',
'min_ram', 'protected', 'tags', 'visibility',
},
)
self.assertIsInstance(image['created_at'], datetime.datetime)
self.assertIsInstance(image['updated_at'], datetime.datetime)
if not (
isinstance(image['deleted_at'], datetime.datetime) or
image['deleted_at'] is None
):
self.fail(
"image's 'deleted_at' attribute was neither a datetime "
"object nor None"
)
def check_is_bool(image, key):
val = image.get('deleted')
if not isinstance(val, bool):
self.fail(
"image's '%s' attribute wasn't a bool: %r" % (key, val)
)
check_is_bool(image, 'deleted')
check_is_bool(image, 'is_public')
def test_show_raises_imagenotfound_for_invalid_id(self):
self.assertRaises(
exception.ImageNotFound,
self.image_service.show,
self.context, 'this image does not exist')
def test_create_adds_id(self):
index = self.image_service.detail(self.context)
image_count = len(index)
self.image_service.create(self.context, {})
index = self.image_service.detail(self.context)
self.assertEqual(len(index), image_count + 1)
self.assertTrue(index[0]['id'])
def test_create_keeps_id(self):
self.image_service.create(self.context, {'id': '34'})
self.image_service.show(self.context, '34')
def test_create_rejects_duplicate_ids(self):
self.image_service.create(self.context, {'id': '34'})
self.assertRaises(
exception.CouldNotUploadImage,
self.image_service.create,
self.context, {'id': '34'})
# Make sure there's still one left
self.image_service.show(self.context, '34')
def test_update(self):
self.image_service.create(
self.context, {'id': '34', 'foo': 'bar'})
self.image_service.update(
self.context, '34', {'id': '34', 'foo': 'baz'})
img = self.image_service.show(self.context, '34')
self.assertEqual(img['foo'], 'baz')
def test_delete(self):
self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
self.image_service.delete(self.context, '34')
self.assertRaises(
exception.NotFound,
self.image_service.show,
self.context, '34')
def test_create_then_get(self):
blob = 'some data'
s1 = io.StringIO(blob)
self.image_service.create(
self.context, {'id': '32', 'foo': 'bar'}, data=s1)
s2 = io.StringIO()
self.image_service.download(self.context, '32', data=s2)
self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact')
|
|
# -*- coding: utf-8 -*-
import io
import ssl
import sys
import copy
import base64
import socket
import httplib
import urlparse
import functools
import contextlib
import collections
import h2.errors
import h2.events
import h2.settings
import h2.connection
import h2.exceptions
from tornado import (
httputil, log, stack_context,
simple_httpclient, netutil
)
from tornado.escape import _unicode, utf8
from tornado.util import GzipDecompressor
from tornado.httpclient import (
HTTPResponse, HTTPError, HTTPRequest, _RequestProxy
)
logger = log.gen_log
__all__ = [
'HTTP2Response', 'HTTP2Error', 'HTTP2ConnectionTimeout',
'HTTP2ConnectionClosed', 'SimpleAsyncHTTP2Client',
]
class HTTP2Response(HTTPResponse):
def __init__(self, *args, **kwargs):
self.pushed_responses = kwargs.pop('pushed_responses', [])
self.new_request = kwargs.pop('new_request', None)
super(HTTP2Response, self).__init__(*args, **kwargs)
reason = kwargs.pop('reason', None)
self.reason = reason or httputil.responses.get(self.code, "Unknown")
class HTTP2Error(HTTPError):
pass
class HTTP2ConnectionTimeout(HTTP2Error):
def __init__(self, time_cost=None):
self.time_cost = time_cost
class HTTP2ConnectionClosed(HTTP2Error):
def __init__(self, reason=None):
super(HTTP2ConnectionClosed, self).__init__(599)
self.reason = reason
class _RequestTimeout(Exception):
pass
class SimpleAsyncHTTP2Client(simple_httpclient.SimpleAsyncHTTPClient):
MAX_CONNECTION_BACKOFF = 10
CONNECTION_BACKOFF_STEP = 1
CLIENT_REGISTRY = {}
def __new__(cls, *args, **kwargs):
force_instance = kwargs.pop('force_instance', False)
host = kwargs['host']
if force_instance or host not in cls.CLIENT_REGISTRY:
client = simple_httpclient.SimpleAsyncHTTPClient.__new__(cls, *args, force_instance=True, **kwargs)
cls.CLIENT_REGISTRY.setdefault(host, client)
else:
client = cls.CLIENT_REGISTRY[host]
return client
def initialize(self, io_loop, host, port=None, max_streams=200,
hostname_mapping=None, max_buffer_size=104857600,
resolver=None, defaults=None, secure=True,
cert_options=None, enable_push=False, connect_timeout=20,
initial_window_size=65535, **conn_kwargs):
# initially, we disables stream multiplexing and wait the settings frame
super(SimpleAsyncHTTP2Client, self).initialize(
io_loop=io_loop, max_clients=1,
hostname_mapping=hostname_mapping, max_buffer_size=max_buffer_size,
resolver=resolver, defaults=defaults, max_header_size=None,
)
self.host = host
if port is None:
port = 443 if secure else 80
self.port = port
self.secure = secure
self.max_streams = max_streams
self.enable_push = bool(enable_push)
self.initial_window_size = initial_window_size
self.connect_timeout = connect_timeout
self.connection_factory = _HTTP2ConnectionFactory(
io_loop=self.io_loop, host=host, port=port,
max_buffer_size=self.max_buffer_size, secure=secure,
cert_options=cert_options, connect_timeout=self.connect_timeout,
tcp_client=self.tcp_client,
)
# open connection
self.connection = None
self.io_stream = None
# back-off
self.next_connect_time = 0
self.connection_backoff = self.CONNECTION_BACKOFF_STEP
self.connection_factory.make_connection(
self._on_connection_ready, self._on_connection_close)
def _adjust_settings(self, event):
logger.debug('settings updated: %r', event.changed_settings)
settings = event.changed_settings.get(h2.settings.MAX_CONCURRENT_STREAMS)
if settings:
self.max_clients = min(settings.new_value, self.max_streams)
if settings.new_value > settings.original_value:
self._process_queue()
def _on_connection_close(self, io_stream, reason):
if self.io_stream is not io_stream:
return
connection = self.connection
self.io_stream = None
self.connection = None
if connection is not None:
connection.on_connection_close(io_stream.error)
# schedule back-off
now_time = self.io_loop.time()
self.next_connect_time = max(
self.next_connect_time,
now_time + self.connection_backoff)
self.connection_backoff = min(
self.connection_backoff + self.CONNECTION_BACKOFF_STEP,
self.MAX_CONNECTION_BACKOFF)
if io_stream is None:
logger.info(
'Connection to %s:%u failed due: %r. Reconnect in %.2f seconds',
self.host, self.port, reason, self.next_connect_time - now_time)
else:
logger.info(
'Connection to %s:%u closed due: %r. Reconnect in %.2f seconds',
self.host, self.port, reason, self.next_connect_time - now_time)
self.io_loop.add_timeout(
self.next_connect_time, functools.partial(
self.connection_factory.make_connection,
self._on_connection_ready, self._on_connection_close
))
# move active request to pending
for key, (request, callback) in self.active.items():
self.queue.appendleft((key, request, callback))
self.active.clear()
def _connection_terminated(self, event):
self._on_connection_close(
self.io_stream, 'Server requested, code: 0x%x' % event.error_code)
def _on_connection_ready(self, io_stream):
# reset back-off, prevent reconnect within back-off period
self.next_connect_time += self.connection_backoff
self.connection_backoff = 0
self.io_stream = io_stream
self.connection = _HTTP2ConnectionContext(
io_stream=io_stream, secure=self.secure,
enable_push=self.enable_push,
max_buffer_size=self.max_buffer_size,
initial_window_size=self.initial_window_size,
)
self.connection.add_event_handler(
h2.events.RemoteSettingsChanged, self._adjust_settings
)
self.connection.add_event_handler(
h2.events.ConnectionTerminated, self._connection_terminated
)
self._process_queue()
def fetch_impl(self, request, callback):
request = _HTTP2Stream.prepare_request(request, self.host)
super(SimpleAsyncHTTP2Client, self).fetch_impl(request, callback)
def _process_queue(self):
if not self.connection:
return
super(SimpleAsyncHTTP2Client, self)._process_queue()
def _handle_request(self, request, release_callback, final_callback):
with self.connection.handle_exception():
stream_id = self.connection.send_request(request)
_HTTP2Stream(
io_loop=self.io_loop, context=self.connection,
request=request, stream_id=stream_id,
release_callback=release_callback,
final_callback=final_callback,
)
class _HTTP2ConnectionFactory(object):
def __init__(self, io_loop, host, port, max_buffer_size, tcp_client,
secure=True, cert_options=None, connect_timeout=None):
self.io_loop = io_loop
self.max_buffer_size = max_buffer_size
self.tcp_client = tcp_client
self.cert_options = collections.defaultdict(lambda: None, **cert_options or {})
self.host = host
self.port = port
self.connect_timeout = connect_timeout
self.ssl_options = self._get_ssl_options(self.cert_options) if secure else None
def make_connection(self, ready_callback, close_callback):
if self.connect_timeout:
timed_out = [False]
start_time = self.io_loop.time()
def _on_timeout():
timed_out[0] = True
close_callback(
io_stream=None,
reason=HTTP2ConnectionTimeout(self.io_loop.time() - start_time)
)
def _on_connect(io_stream):
if timed_out[0]:
io_stream.close()
return
self.io_loop.remove_timeout(timeout_handle)
self._on_connect(io_stream, ready_callback, close_callback)
timeout_handle = self.io_loop.add_timeout(
start_time + self.connect_timeout, _on_timeout)
else:
_on_connect = functools.partial(
self._on_connect,
ready_callback=ready_callback,
close_callback=close_callback,
)
logger.info('Establishing HTTP/2 connection to %s:%s...', self.host, self.port)
with stack_context.ExceptionStackContext(
functools.partial(self._handle_exception, close_callback)):
self.tcp_client.connect(
self.host, self.port, af=socket.AF_UNSPEC,
ssl_options=self.ssl_options,
max_buffer_size=self.max_buffer_size,
callback=_on_connect)
@classmethod
def _handle_exception(cls, close_callback, typ, value, tb):
close_callback(io_stream=None, reason=value)
return True
@classmethod
def _get_ssl_options(cls, cert_options):
ssl_options = {}
if cert_options['validate_cert']:
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
if cert_options['ca_certs'] is not None:
ssl_options["ca_certs"] = cert_options['ca_certs']
else:
ssl_options["ca_certs"] = simple_httpclient._default_ca_certs()
if cert_options['client_key'] is not None:
ssl_options["keyfile"] = cert_options['client_key']
if cert_options['client_cert'] is not None:
ssl_options["certfile"] = cert_options['client_cert']
# according to REC 7540:
# deployments of HTTP/2 that use TLS 1.2 MUST
# support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
ssl_options["ciphers"] = "ECDH+AESGCM"
ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1_2
ssl_options = netutil.ssl_options_to_context(ssl_options)
ssl_options.set_alpn_protocols(['h2'])
return ssl_options
def _on_connect(self, io_stream, ready_callback, close_callback):
io_stream.set_close_callback(lambda: close_callback(io_stream, io_stream.error))
self.io_loop.add_callback(functools.partial(ready_callback, io_stream))
io_stream.set_nodelay(True)
class _HTTP2ConnectionContext(object):
"""maintenance a http/2 connection state on specific io_stream
"""
def __init__(self, io_stream, secure, enable_push,
max_buffer_size, initial_window_size):
self.io_stream = io_stream
self.schema = 'https' if secure else 'http'
self.enable_push = enable_push
self.initial_window_size = initial_window_size
self.max_buffer_size = max_buffer_size
self.is_closed = False
# h2 contexts
self.stream_delegates = {}
self.event_handlers = {} # connection level event, event -> handler
self.reset_stream_ids = collections.deque(maxlen=50)
self.h2_conn = h2.connection.H2Connection(client_side=True)
self.h2_conn.initiate_connection()
self.h2_conn.update_settings({
h2.settings.ENABLE_PUSH: int(self.enable_push),
h2.settings.INITIAL_WINDOW_SIZE: self.initial_window_size,
})
self._setup_reading()
self._flush_to_stream()
def on_connection_close(self, reason):
if self.is_closed:
return
self.is_closed = True
for delegate in self.stream_delegates.values():
delegate.on_connection_close(reason)
@contextlib.contextmanager
def handle_exception(self):
try:
yield
except Exception as err:
exc_info = sys.exc_info()
logger.error('Unexpected exception: %r', err, exc_info=exc_info)
try:
self.io_stream.close(exc_info)
finally:
self.on_connection_close(err)
# h2 related
def _on_connection_streaming(self, data):
"""handles streaming data"""
if self.is_closed:
return
with self.handle_exception():
events = self.h2_conn.receive_data(data)
if events:
self._process_events(events)
self._flush_to_stream()
def _flush_to_stream(self):
"""flush h2 connection data to IOStream"""
data_to_send = self.h2_conn.data_to_send()
if data_to_send:
self.io_stream.write(data_to_send)
def send_request(self, request):
http2_headers = [
(':authority', request.headers.pop('Host')),
(':path', request.url),
(':scheme', self.schema),
(':method', request.method),
] + request.headers.items()
stream_id = self.h2_conn.get_next_available_stream_id()
self.h2_conn.send_headers(stream_id, http2_headers, end_stream=not request.body)
if request.body:
self.h2_conn.send_data(stream_id, request.body, end_stream=True)
self._flush_to_stream()
return stream_id
def set_stream_delegate(self, stream_id, stream_delegate):
self.stream_delegates[stream_id] = stream_delegate
def remove_stream_delegate(self, stream_id):
del self.stream_delegates[stream_id]
def add_event_handler(self, event_type, event_handler):
self.event_handlers[event_type] = event_handler
def remove_event_handler(self, event_type):
del self.event_handlers[event_type]
def reset_stream(self, stream_id, reason=h2.errors.REFUSED_STREAM, flush=False):
if self.is_closed:
return
try:
self.h2_conn.reset_stream(stream_id, reason)
except h2.exceptions.StreamClosedError:
return
else:
if flush:
self._flush_to_stream()
def _process_events(self, events):
stream_inbounds = collections.defaultdict(int)
for event in events:
if isinstance(event, h2.events.DataReceived):
stream_inbounds[event.stream_id] += event.flow_controlled_length
if isinstance(event, h2.events.PushedStreamReceived):
stream_id = event.parent_stream_id
else:
stream_id = getattr(event, 'stream_id', None)
if stream_id is not None and stream_id != 0:
if stream_id in self.stream_delegates:
stream_delegate = self.stream_delegates[stream_id]
with stack_context.ExceptionStackContext(stream_delegate.handle_exception):
stream_delegate.handle_event(event)
else:
# FIXME: our nginx server will simply reset stream,
# without increase the window size which consumed by
# queued data frame which was belongs to the stream we're resetting
# self.reset_stream(stream_id)
if stream_id in self.reset_stream_ids:
if isinstance(event, h2.events.StreamEnded):
self.reset_stream_ids.remove(stream_id)
else:
logger.warning('Unexpected stream: %s, event: %r', stream_id, event)
continue
event_type = type(event)
if event_type in self.event_handlers:
try:
self.event_handlers[event_type](event)
except Exception as err:
logger.exception('Exception while handling event: %r', err)
continue
logger.debug('ignored event: %r, %r', event, event.__dict__)
# collects all inbound lengths, reducing the count of WindowUpdate frames.
connection_inbound = 0
for stream_id, stream_inbound in stream_inbounds.items():
if not stream_inbound:
continue
connection_inbound += stream_inbound
try:
self.h2_conn.increment_flow_control_window(stream_inbound, stream_id)
except (h2.exceptions.StreamClosedError, KeyError):
# we can simply ignore StreamClosedError because closed streams
# doesn't requires WindowUpdate
pass
if connection_inbound:
self.h2_conn.increment_flow_control_window(connection_inbound)
def _setup_reading(self, *_):
if self.is_closed:
return
with stack_context.NullContext():
self.io_stream.read_bytes(
num_bytes=65535, callback=self._setup_reading,
streaming_callback=self._on_connection_streaming)
class _HTTP2Stream(httputil.HTTPMessageDelegate):
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
def __init__(
self, io_loop, context, request, stream_id,
release_callback=None, final_callback=None):
self.io_loop = io_loop
self.start_time = self.io_loop.time()
self.context = context
self.release_callback = release_callback
self.final_callback = final_callback
self.chunks = []
self.headers = None
self.code = None
self.reason = None
self._timeout = None
self._pushed_streams = {}
self._pushed_responses = {}
self._stream_ended = False
self._finalized = False
self._decompressor = None
self.stream_id = stream_id
self.request = request
self.context.set_stream_delegate(self.stream_id, self)
if request.request_timeout:
with stack_context.ExceptionStackContext(self.handle_exception):
self._timeout = self.io_loop.add_timeout(
self.start_time + request.request_timeout, self._on_timeout)
@classmethod
def build_http_headers(cls, headers):
http_headers = httputil.HTTPHeaders()
for name, value in headers:
http_headers.add(name, value)
return http_headers
def from_push_stream(self, event):
headers = self.build_http_headers(event.headers)
method = headers.pop(':method')
scheme = headers.pop(':scheme')
authority = headers.pop(':authority')
path = headers.pop(':path')
full_url = '%s://%s%s' % (scheme, authority, path)
request = HTTPRequest(url=full_url, method=method, headers=headers)
return _HTTP2Stream(
io_loop=self.io_loop, context=self.context,
request=request, stream_id=event.pushed_stream_id,
final_callback=functools.partial(
self.finish_push_stream, event.pushed_stream_id)
)
def finish_push_stream(self, stream_id, response):
if self._finalized:
return
self._pushed_responses[stream_id] = response
if not self._stream_ended:
return
if len(self._pushed_streams) == len(self._pushed_responses):
self.finish()
@classmethod
def prepare_request(cls, request, default_host):
parsed = urlparse.urlsplit(_unicode(request.url))
if (request.method not in cls._SUPPORTED_METHODS and
not request.allow_nonstandard_methods):
raise KeyError("unknown method %s" % request.method)
request.follow_redirects = False
for key in ('network_interface',
'proxy_host', 'proxy_port',
'proxy_username', 'proxy_password',
'expect_100_continue', 'body_producer',
):
if getattr(request, key, None):
raise NotImplementedError('%s not supported' % key)
request.headers.pop('Connection', None)
if "Host" not in request.headers:
if not parsed.netloc:
request.headers['Host'] = default_host
elif '@' in parsed.netloc:
request.headers["Host"] = parsed.netloc.rpartition('@')[-1]
else:
request.headers["Host"] = parsed.netloc
username, password = None, None
if parsed.username is not None:
username, password = parsed.username, parsed.password
elif request.auth_username is not None:
username = request.auth_username
password = request.auth_password or ''
if username is not None:
if request.auth_mode not in (None, "basic"):
raise ValueError("unsupported auth_mode %s",
request.auth_mode)
auth = utf8(username) + b":" + utf8(password)
request.headers["Authorization"] = (
b"Basic " + base64.b64encode(auth))
if request.user_agent:
request.headers["User-Agent"] = request.user_agent
if not request.allow_nonstandard_methods:
# Some HTTP methods nearly always have bodies while others
# almost never do. Fail in this case unless the user has
# opted out of sanity checks with allow_nonstandard_methods.
body_expected = request.method in ("POST", "PATCH", "PUT")
body_present = (request.body is not None or
request.body_producer is not None)
if ((body_expected and not body_present) or
(body_present and not body_expected)):
raise ValueError(
'Body must %sbe None for method %s (unless '
'allow_nonstandard_methods is true)' %
('not ' if body_expected else '', request.method))
if request.body is not None:
# When body_producer is used the caller is responsible for
# setting Content-Length (or else chunked encoding will be used).
request.headers["Content-Length"] = str(len(
request.body))
if (request.method == "POST" and
"Content-Type" not in request.headers):
request.headers["Content-Type"] = "application/x-www-form-urlencoded"
if request.decompress_response:
request.headers["Accept-Encoding"] = "gzip"
request.url = (
(parsed.path or '/') +
(('?' + parsed.query) if parsed.query else '')
)
return request
def headers_received(self, first_line, headers):
if self.request.decompress_response \
and headers.get("Content-Encoding") == "gzip":
self._decompressor = GzipDecompressor()
# Downstream delegates will only see uncompressed data,
# so rename the content-encoding header.
headers.add("X-Consumed-Content-Encoding",
headers["Content-Encoding"])
del headers["Content-Encoding"]
self.headers = headers
self.code = first_line.code
self.reason = first_line.reason
if self.request.header_callback is not None:
# Reassemble the start line.
self.request.header_callback('%s %s %s\r\n' % first_line)
for k, v in self.headers.get_all():
self.request.header_callback("%s: %s\r\n" % (k, v))
self.request.header_callback('\r\n')
def _run_callback(self, response):
if self._finalized:
return
if self.release_callback is not None:
self.release_callback()
with stack_context.NullContext():
self.io_loop.add_callback(functools.partial(self.final_callback, response))
self._finalized = True
def handle_event(self, event):
if isinstance(event, h2.events.ResponseReceived):
headers = self.build_http_headers(event.headers)
status_code = int(headers.pop(':status'))
start_line = httputil.ResponseStartLine(
'HTTP/2.0', status_code, httplib.responses[status_code]
)
self.headers_received(start_line, headers)
elif isinstance(event, h2.events.DataReceived):
self.data_received(event.data)
elif isinstance(event, h2.events.StreamEnded):
self._stream_ended = True
self.context.remove_stream_delegate(self.stream_id)
if len(self._pushed_responses) == len(self._pushed_streams):
self.finish()
elif isinstance(event, h2.events.PushedStreamReceived):
stream = self.from_push_stream(event)
self._pushed_streams[event.pushed_stream_id] = stream
else:
logger.warning('ignored event: %r, %r', event, event.__dict__)
def finish(self):
self._remove_timeout()
self._unregister_unfinished_streams()
if self._decompressor:
self._data_received(self._decompressor.flush())
data = b''.join(self.chunks)
original_request = getattr(self.request, "original_request",
self.request)
new_request = None
if (self.request.follow_redirects and
self.request.max_redirects > 0 and
self.code in (301, 302, 303, 307)):
assert isinstance(self.request, _RequestProxy)
new_request = copy.copy(self.request.request)
new_request.url = urlparse.urljoin(self.request.url,
self.headers["Location"])
new_request.max_redirects = self.request.max_redirects - 1
del new_request.headers["Host"]
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
# Client SHOULD make a GET request after a 303.
# According to the spec, 302 should be followed by the same
# method as the original request, but in practice browsers
# treat 302 the same as 303, and many servers use 302 for
# compatibility with pre-HTTP/1.1 user agents which don't
# understand the 303 status.
if self.code in (302, 303):
new_request.method = "GET"
new_request.body = None
for h in ["Content-Length", "Content-Type",
"Content-Encoding", "Transfer-Encoding"]:
try:
del self.request.headers[h]
except KeyError:
pass
new_request.original_request = original_request
if self.request.streaming_callback:
buff = io.BytesIO()
else:
buff = io.BytesIO(data) # TODO: don't require one big string?
response = HTTP2Response(
original_request, self.code, reason=self.reason,
headers=self.headers, request_time=self.io_loop.time() - self.start_time,
buffer=buff, effective_url=self.request.url,
pushed_responses=self._pushed_responses.values(),
new_request=new_request,
)
self._run_callback(response)
def _data_received(self, chunk):
if self.request.streaming_callback is not None:
self.request.streaming_callback(chunk)
else:
self.chunks.append(chunk)
def data_received(self, chunk):
if self._decompressor:
compressed_data = chunk
decompressed = self._decompressor.decompress(compressed_data, 0)
if decompressed:
self._data_received(decompressed)
else:
self._data_received(chunk)
def handle_exception(self, typ, error, tb):
if isinstance(error, _RequestTimeout):
if self._stream_ended:
self.finish()
return True
else:
error = HTTPError(599, "Timeout")
self._remove_timeout()
self._unregister_unfinished_streams()
if hasattr(self, 'stream_id'):
self.context.remove_stream_delegate(self.stream_id)
# FIXME: our nginx server will simply reset stream,
# without increase the window size which consumed by
# queued data frame which was belongs to the stream we're resetting
# self.context.reset_stream(self.stream_id, flush=True)
self.context.reset_stream_ids.append(self.stream_id)
error.__traceback__ = tb
response = HTTP2Response(
self.request, 599, error=error,
request_time=self.io_loop.time() - self.start_time,
)
self._run_callback(response)
return True
def _unregister_unfinished_streams(self):
for stream_id in self._pushed_streams:
if stream_id not in self._pushed_responses:
self.context.remove_stream_delegate(stream_id)
def _remove_timeout(self):
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _on_timeout(self):
self._timeout = None
self.connection_timeout = True
raise _RequestTimeout()
def on_connection_close(self, reason=None):
try:
raise HTTP2ConnectionClosed(reason)
except Exception:
self.handle_exception(*sys.exc_info())
|
|
from operator import itemgetter
from corehq.apps.groups.models import Group
from corehq.apps.reports import util
from corehq.apps.reports.standard import CommCareUserMemoizer
from corehq.apps.reports.util import format_datatables_data, make_ctable_table_name
from .filters import ALL_CVSU_GROUP
from dimagi.utils.decorators.memoized import memoized
from sqlagg import AliasColumn
from sqlagg.columns import SimpleColumn, YearColumn, MonthColumn, YearQuarterColumn, SumColumn
from corehq.apps.reports.datatables import DataTablesColumnGroup
from corehq.apps.reports.sqlreport import DatabaseColumn, AggregateColumn, SqlData
def combine_month_year(year, month):
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
return "%s %s" % (months[int(month) - 1], int(year)), int(year * 100 + month)
def combine_quarter_year(year, quarter):
return "%s Q%s" % (int(year), int(quarter)), int(year * 10 + quarter)
def format_year(year):
return format_datatables_data(int(year), int(year))
def format_date(value):
return format_datatables_data(value[0], value[1])
def make_trend(reportdata):
class TR(reportdata):
chart_x_label = 'Date'
@property
@memoized
def years(self):
delta = self.datespan.enddate - self.datespan.startdate
return delta.days / 365
@property
@memoized
def keys(self):
if self.grouping == 'month':
return self.month_keys
elif self.grouping == 'quarter':
return self.quarter_keys
else:
return self.year_keys
@property
@memoized
def grouping(self):
if self.years < 2:
return 'month'
elif self.years < 6:
return 'quarter'
else:
return 'year'
@property
@memoized
def month_keys(self):
dt1 = self.datespan.startdate
dt2 = self.datespan.enddate
start_month = dt1.month
end_months = (dt2.year - dt1.year) * 12 + dt2.month + 1
dates = [[float(yr), float(mn)] for (yr, mn) in (
((m - 1) / 12 + dt1.year, (m - 1) % 12 + 1) for m in range(start_month, end_months)
)]
return dates
@property
@memoized
def quarter_keys(self):
months = self.month_keys
quarter_set = set([(t[0], (t[1] - 1) // 3 + 1) for t in months])
return sorted(list(quarter_set), key=lambda x: int(x[0] * 10 + x[1]))
@property
@memoized
def year_keys(self):
dt1 = self.datespan.startdate
dt2 = self.datespan.enddate
return [[year] for year in range(dt1.year, dt2.year + 1)]
@property
def group_by(self):
if self.grouping == 'month':
return ['year', 'month']
elif self.grouping == 'quarter':
return ['year', 'quarter']
else:
return ['year']
@property
def columns(self):
cols = super(TR, self).columns
if self.grouping == 'month':
cols[0] = AggregateColumn(
"Month", combine_month_year,
[YearColumn('date', alias='year'), MonthColumn('date', alias='month')],
format_fn=format_date)
elif self.grouping == 'quarter':
cols[0] = AggregateColumn(
"Quarter", combine_quarter_year,
[YearColumn('date', alias='year'), YearQuarterColumn('date', alias='quarter')],
format_fn=format_date)
else:
cols[0] = DatabaseColumn("Year", YearColumn('date', alias='year'), format_fn=format_year)
return cols
return TR
class BaseSqlData(SqlData):
has_total_column = True
def __init__(self, config=None):
self.config = config
for slug, value in self.config.items():
if not hasattr(self, slug):
setattr(self, slug, value)
@property
def location_column(self):
if self.group_by_district:
return DatabaseColumn("Location", SimpleColumn('group_id'), format_fn=self.groupname, sort_type=None)
else:
return DatabaseColumn("Location", SimpleColumn('user_id'), format_fn=self.username, sort_type=None)
@property
def group_by_district(self):
return self.group_id == ALL_CVSU_GROUP
@property
def filters(self):
filters = ['date between :startdate and :enddate']
if not self.group_by_district:
filters.append('"user_id" in :users')
return filters
@property
def filter_values(self):
users = tuple([user.user_id for user in self.users])
return dict(startdate=self.datespan.startdate,
enddate=self.datespan.enddate,
users=users)
@property
def data(self):
if not self.users:
# don't bother querying if there are no users
return {}
return super(BaseSqlData, self).data
@property
def group_by(self):
if self.group_by_district:
return ['group_id']
else:
return ['user_id']
@property
@memoized
def group(self):
if self.group_id:
return Group.get(self.group_id)
@property
@memoized
def users(self):
group = self.group if not self.user_id else None
user_ids = (self.user_id,)
users = list(util.get_all_users_by_domain(
domain=self.domain,
user_ids=user_ids,
group=group,
simplified=True,
CommCareUser=CommCareUserMemoizer()
))
return sorted(users, key=itemgetter('raw_username'))
@property
@memoized
def keys(self):
if self.group_by_district:
return [[g.get_id] for g in Group.by_domain(self.domain) if g.get_id != ALL_CVSU_GROUP]
else:
return [[user.user_id] for user in self.users]
@property
@memoized
def usernames(self):
return {user.user_id: user.raw_username for user in self.users}
def username(self, user_id):
try:
username = self.usernames[user_id]
except KeyError:
username = user_id
return format_datatables_data(username, username)
def groupname(self, group_id):
try:
groupname = Group.get(group_id).name
except KeyError:
groupname = group_id
return format_datatables_data(groupname, groupname)
class AgeGenderFilteredReport(BaseSqlData):
@property
def filters(self):
filters = super(AgeGenderFilteredReport, self).filters
if self.gender:
filters.append('sex = :sex')
if self.age:
if self.age == 'lt5':
filters.append('age < :ageupper')
elif self.age == '5-18':
filters.append('age between :agelower and :ageupper')
elif self.age == 'lt18':
filters.append('age < :ageupper')
elif self.age == 'gte18':
filters.append('age between :agelower and :ageupper')
return filters
@property
def filter_values(self):
vals = super(AgeGenderFilteredReport, self).filter_values
if self.gender:
vals['sex'] = self.gender
if self.age:
if self.age == 'lt5':
vals['ageupper'] = 5
elif self.age == '5-18':
vals['agelower'] = 5
vals['ageupper'] = 18
elif self.age == 'lt18':
vals['ageupper'] = 18
elif self.age == 'gte18':
vals['agelower'] = 18
vals['ageupper'] = 500
return vals
class ChildProtectionData(AgeGenderFilteredReport):
title = 'Number and Type of Incidents of Abuse Reported at CVSU'
chart_x_label = 'CVSU Location'
chart_y_label = 'Number of incidents'
table_name = make_ctable_table_name('cvsulive_UnicefMalawiFluff')
@property
def columns(self):
cat_group = DataTablesColumnGroup("Category of abuse")
return [
self.location_column,
DatabaseColumn("Physical", SumColumn('abuse_category_physical_total'), header_group=cat_group),
DatabaseColumn("Sexual", SumColumn('abuse_category_sexual_total'), header_group=cat_group),
DatabaseColumn("Emotional", SumColumn('abuse_category_psychological_total'), header_group=cat_group),
DatabaseColumn("Neglect", SumColumn('abuse_category_neglect_total'), header_group=cat_group),
DatabaseColumn("Exploitation", SumColumn('abuse_category_exploitation_total'), header_group=cat_group),
DatabaseColumn("Other", SumColumn('abuse_category_other_total'), header_group=cat_group),
DatabaseColumn("Total incidents reported", SumColumn('abuse_category_total_total'), header_group=cat_group)
]
class ChildrenInHouseholdData(AgeGenderFilteredReport):
title = 'Number of Children in Survivor Household'
chart_x_label = 'CVSU Location'
chart_y_label = 'Number of children'
table_name = make_ctable_table_name('cvsulive_UnicefMalawiFluff')
has_total_column = False
@property
def columns(self):
return [
self.location_column,
DatabaseColumn("Children per household experiencing abuse", SumColumn('abuse_children_abused_total')),
DatabaseColumn("Total number of children in household", SumColumn('abuse_children_in_household_total')),
]
class CVSUActivityData(BaseSqlData):
title = 'Activities Performed'
chart_x_label = 'CVSU Location'
chart_y_label = 'Number of reports'
table_name = make_ctable_table_name('cvsulive_UnicefMalawiFluff')
@property
def columns(self):
return [
self.location_column,
DatabaseColumn("Incidents of Abuse", SumColumn('incidents_total')),
DatabaseColumn("Outreach activities", SumColumn('outreach_total')),
DatabaseColumn("IGA Reports", SumColumn('iga_total')),
AggregateColumn(
"Total", self.sum,
[AliasColumn('incidents_total'), AliasColumn('outreach_total')]),
]
def sum(self, no_incidents, outreach):
return (no_incidents or 0) + (outreach or 0)
class CVSUServicesData(BaseSqlData):
title = 'Services Provided'
chart_x_label = 'CVSU Location'
chart_y_label = 'Number of incidents'
table_name = make_ctable_table_name('cvsulive_UnicefMalawiFluff')
@property
def columns(self):
return [
self.location_column,
DatabaseColumn("Counselling", SumColumn('service_counselling_total')),
DatabaseColumn("Psychosocial Support", SumColumn('service_psychosocial_support_total')),
DatabaseColumn("First Aid", SumColumn('service_first_aid_total')),
DatabaseColumn("Shelter", SumColumn('service_shelter_total')),
DatabaseColumn("Referral", SumColumn('service_referral_total')),
DatabaseColumn("Mediation", SumColumn('service_mediation_total')),
DatabaseColumn("Other", SumColumn('service_other_total')),
DatabaseColumn("Total", SumColumn('service_total_total')),
]
class CVSUIncidentResolutionData(BaseSqlData):
title = 'Incident Resolution'
chart_x_label = 'CVSU Location'
chart_y_label = 'Number of incidents'
table_name = make_ctable_table_name('cvsulive_UnicefMalawiFluff')
@property
def columns(self):
return [
self.location_column,
DatabaseColumn("Resolved at CVSU", SumColumn('resolution_resolved_at_cvsu_total')),
DatabaseColumn("Referred to TA", SumColumn('resolution_referred_ta_total')),
DatabaseColumn("Referred to TA Court", SumColumn('resolution_referral_ta_court_total')),
DatabaseColumn("Referred to Police", SumColumn('resolution_referral_police_total')),
DatabaseColumn("Referred to Social Welfare", SumColumn('resolution_referral_social_welfare_total')),
DatabaseColumn("Referred to NGO", SumColumn('resolution_referral_ngo_total')),
DatabaseColumn("Referred to Other", SumColumn('resolution_referral_other_total')),
DatabaseColumn("Unresolved", SumColumn('resolution_unresolved_total')),
DatabaseColumn("Case Withdrawn", SumColumn('resolution_case_withdrawn_total')),
DatabaseColumn("Other", SumColumn('resolution_other_total')),
DatabaseColumn("Total", SumColumn('resolution_total_total')),
]
ChildProtectionDataTrend = make_trend(ChildProtectionData)
ChildrenInHouseholdDataTrend = make_trend(ChildrenInHouseholdData)
CVSUServicesDataTrend = make_trend(CVSUServicesData)
CVSUActivityDataTrend = make_trend(CVSUActivityData)
CVSUIncidentResolutionDataTrend = make_trend(CVSUIncidentResolutionData)
|
|
#!/usr/bin/env python
"""
insta_raider.py
usage: insta_raider.py [-h] -u USERNAME
@amirkurtovic
"""
import argparse
import logging
import os
import os.path as op
import re
import email.utils as eut
import requests
import calendar
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
from urllib3.exceptions import InsecurePlatformWarning
import time
import warnings
import selenium.webdriver as webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException
import json
from datetime import datetime
from multiprocessing import Process
try:
from gi.repository import GExiv2
except ImportError:
GExiv2 = None
warnings.filterwarnings("ignore", category=InsecurePlatformWarning)
class PrivateUserError(Exception):
"""Raised if the profile is found to be private"""
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class MultiDownloader(Process):
def __init__(self, link, headers, name):
super(MultiDownloader, self).__init__()
self.link = link
self.headers = headers
self.photo_name = name
def run(self):
image_request = requests.get(self.link, headers=self.headers)
image_data = image_request.content
with open(self.photo_name, 'wb') as fp:
fp.write(image_data)
self.headers = image_request.headers
if "last-modified" in self.headers:
modtime = calendar.timegm(eut.parsedate(self.headers["last-modified"]))
os.utime(self.photo_name, (modtime, modtime))
class InstaRaider(object):
def __init__(self, username, directory, num_to_download=None,
log_level='info', use_metadata=False, get_videos=False,
process_number=100):
self.username = username
self.profile_url = self.get_url(username)
self.directory = directory
self.PAUSE = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = {'User-Agent': self.user_agent}
self.html_source = None
self.log_level = getattr(logging, log_level.upper())
self.setup_logging(self.log_level)
self.use_metadata = use_metadata
self.get_videos = get_videos
self.set_num_posts(num_to_download)
self.setup_webdriver()
self.process_number = process_number
def __del__(self):
if self.webdriver:
self.webdriver.close()
def get_url(self, path):
return urlparse.urljoin('https://instagram.com', path)
def set_num_posts(self, num_to_download=None):
self.num_posts = int(self.get_posts_count(self.profile_url) or 0)
self.num_to_download = num_to_download
def setup_logging(self, level=logging.INFO):
self.logger = logging.getLogger('instaraider')
self.logger.addHandler(logging.StreamHandler())
self.logger.setLevel(logging.INFO)
def log(self, *strings, **kwargs):
level = kwargs.pop('level', logging.INFO)
self.logger.log(level, u' '.join(str(s) for s in strings))
def setup_webdriver(self):
self.profile = webdriver.FirefoxProfile()
self.profile.set_preference("general.useragent.override", self.user_agent)
self.webdriver = webdriver.Firefox(self.profile)
self.webdriver.set_window_size(480, 320)
self.webdriver.set_window_position(800, 0)
def get_posts_count(self, url):
"""
Given a url to Instagram profile, return number of photos posted
"""
response = requests.get(url)
counts_code = re.search(r'\"media":\s*{"count":\s*\d+', response.text)
if not counts_code:
return None
return re.findall(r'\d+', counts_code.group())[0]
def log_in_user(self):
driver = self.webdriver
self.log('You need to login to access this profile.',
'Redirecting you to the login page in the browser.',
level=logging.WARN)
driver.get(self.get_url('accounts/login/'))
# Wait until user has been successfully logged in and redirceted
# to his/her feed.
WebDriverWait(driver, 60).until(
expected_conditions.presence_of_element_located(
(By.CSS_SELECTOR, '.-cx-PRIVATE-FeedPage__feed'),
)
)
self.log('User successfully logged in.', level=logging.INFO)
self.set_num_posts() # Have to set this again
driver.get(self.profile_url)
def load_instagram(self):
"""
Using Selenium WebDriver, load Instagram page to get page source
"""
self.log(self.username, 'has', self.num_posts, 'posts on Instagram.')
if self.num_to_download is not None:
self.log("The first", self.num_to_download, "of them will be downloaded.")
num_to_download = self.num_to_download or self.num_posts
driver = self.webdriver
# load Instagram profile and wait for PAUSE
self.log("Loading Instagram profile...")
driver.get(self.profile_url)
driver.implicitly_wait(self.PAUSE)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
el = driver.find_element_by_css_selector(
'.-cx-PRIVATE-ProfilePage__advisoryMessageHeader'
)
except NoSuchElementException:
pass
else:
self.log_in_user()
if (num_to_download > 12):
scroll_to_bottom = self.get_scroll_count(num_to_download)
element = driver.find_element_by_css_selector('a._oidfu')
time.sleep(0.5)
element.click()
for y in range(int(scroll_to_bottom)):
self.scroll_page(driver)
# After load all profile photos, retur, source to download_photos()
time.sleep(1)
source = driver.page_source
return source
def scroll_page(self, driver):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(0.05)
driver.execute_script("window.scrollTo(0, 0);")
time.sleep(0.05)
def get_scroll_count(self, count):
return (int(count) - 12) / 12 + 1
def validate(self):
"""
returns True if Instagram username is valid
"""
req = requests.get(self.profile_url)
try:
req.raise_for_status()
except:
self.log('User', self.username, 'is not valid.',
level=logging.ERROR)
return False
if not self.num_posts:
self.log('User', self.username, 'has no photos to download.',
level=logging.ERROR)
return False
return True
def download_photos(self):
"""
Given source code for loaded Instagram page,
extract all hrefs and download full-resolution photos
source: HTML source code of Instagram profile papge
"""
num_to_download = self.num_to_download or self.num_posts
if self.html_source is None:
self.html_source = self.load_instagram()
# check if directory exists, if not, make it
if not op.exists(self.directory):
os.makedirs(self.directory)
# index for progress bar
photos_saved = 0
self.log("Saving photos to", self.directory)
links = self.find_links()
downloaders = []
for link in links:
photo_url = link['display_src']
photo_url = photo_url.replace('\\', '')
photo_url = re.sub(r'/s\d+x\d+/', '/', photo_url)
photo_url = re.sub(r'/\w+\.\d+\.\d+\.\d+/', '/', photo_url)
caption = link.get('caption')
date_time = link.get('date_time')
photo_basename = op.basename(photo_url)
photo_name = op.join(self.directory, photo_basename)
# save full-resolution photo if its new
if not op.isfile(photo_name):
if len(downloaders) > self.process_number:
downloaders.pop(0).join()
downloader = MultiDownloader(photo_url, self.headers, photo_name)
downloaders.append(downloader)
downloader.start()
photos_saved += 1
self.log('Downloaded file {}/{} ({}).'.format(
photos_saved, num_to_download, photo_basename))
# put info from Instagram post into image metadata
if self.use_metadata:
self.add_metadata(photo_name, caption, date_time)
else:
self.log('Skipping file', photo_name, 'as it already exists.')
if photos_saved >= num_to_download:
break
for downloader in downloaders:
name = downloader.name
headers = downloader.join()
self.log('Saved', photos_saved, 'files to', self.directory)
def find_links(self):
"""
Find all image urls/metadata in html_source.
Returns a list of dicts.
e.g., [{'display_src': 'http://image.url',
'caption': 'some text from Instagram post',
'date_time': '1448420058.0'},
{...},]
'display_src' must be present in each dict;
the other keys are optional.
"""
photos = []
if self.use_metadata:
if not GExiv2:
self.use_metadata = False
self.log('GExiv2 python module not found.',
'Images will not be tagged.')
try:
json_data = re.search(r'(?s)<script [^>]*>window\._sharedData'
r'.*?"nodes".+?</script>',
self.html_source)
json_data = re.search(r'{.+}', json_data.group(0))
json_data = json.loads(json_data.group(0))
photos = list(gen_dict_extract('nodes', json_data))[0]
# find profile_pic
profile_pic = list(gen_dict_extract('profile_pic_url', json_data))
if profile_pic:
# todo (possible):
# add a key in the dict to indicate this is profile_pic.
# then we could also name the file "profile.jpg" or similar
# and also not include it in photos_saved so if user
# uses -n N to download some number of images, he still gets
# the first N images rather than N-1 images plus
# profile_pic
profile_pic = [{'display_src': p} for p in profile_pic[:1]]
photos = profile_pic + photos
except:
if self.use_metadata:
self.use_metadata = False
self.log('Could not find any image metadata.',
'Photos will not be tagged.')
else:
links = re.finditer(r'src="([https]+:...[\/\w \.-]*..[\/\w \.-]*'
r'..[\/\w \.-]*..[\/\w \.-].jpg)',
self.html_source)
photos = [{'display_src': m.group(1)} for m in links]
for photo in photos:
date = photo.get('date')
if date:
try:
photo['date_time'] = datetime.fromtimestamp(date)
except:
photo['date_time'] = None
return photos
def download_videos(self):
"""
Given source code for loaded Instagram page:
- discover all video wrapper links
- activate all links to load video url
- extract and download video url
"""
if not self.get_videos:
return;
# We need to use the driver to query the video wrappers
driver = self.webdriver
if self.html_source is None:
self.html_source = self.load_instagram()
if not op.exists(self.directory):
os.makedirs(self.directory)
videos_saved = 0
self.log("Saving videos to", self.directory)
# Find all of the video wrappers
video_wrapper_elements = driver.find_elements_by_xpath('.//*[@id="react-root"]/section/main/article/div/div[1]/div/a[.//*[@Class="_1lp5e"]]')
video_wrapper_urls = [link.get_attribute('href') for link in video_wrapper_elements]
num_to_download = len(video_wrapper_urls)
downloaders = []
for video_wrapper in video_wrapper_urls:
# Fetch the link of the video wrapper
driver.get(video_wrapper)
response = requests.get(video_wrapper)
video_url = re.search(r'[https]+:...[\/\w \.-]*..[\/\w \.-]*'r'..[\/\w \.-]*..[\/\w \.-].mp4', response.text).group()
video_name = op.join(self.directory, video_url.split('/')[len(video_url.split('/')) - 1])
if not op.isfile(video_name):
if len(downloaders) > self.process_number:
downloaders.pop(0).join()
downloader = MultiDownloader(video_url, self.headers, video_name)
downloaders.append(downloader)
downloader.start()
videos_saved += 1
self.log('Downloaded file {}/{} ({}).'.format(
videos_saved, num_to_download, op.basename(video_name)))
else:
self.log('Skipping file', video_name, 'as it already exists.')
if videos_saved >= num_to_download:
break
for downloader in downloaders:
name = downloader.name
headers = downloader.join()
self.log('Saved', videos_saved, 'videos to', self.directory)
def add_metadata(self, photo_name, caption, date_time):
"""
Tag downloaded photos with metadata from associated Instagram post.
If GExiv2 is not installed, do nothing.
"""
if GExiv2:
if caption or date_time:
# todo: improve error handling
try:
exif = GExiv2.Metadata(photo_name)
if caption:
try:
exif.set_comment(caption)
except:
self.log('Error setting image caption metadata.')
if date_time:
try:
exif.set_date_time(date_time)
except:
self.log('Error setting image date metadata.')
exif.save_file()
except:
pass
def gen_dict_extract(key, var):
"""
Recursively search for given dict key in nested dicts.
from http://stackoverflow.com/a/29652561
author: hexerei software
"""
if hasattr(var,'iteritems'):
for k, v in var.iteritems():
if k == key:
yield v
if isinstance(v, dict):
for result in gen_dict_extract(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in gen_dict_extract(key, d):
yield result
def main():
# parse arguments
parser = argparse.ArgumentParser(description='InstaRaider')
parser.add_argument('username', help='Instagram username')
parser.add_argument('directory', help='Where to save the images')
parser.add_argument('-n', '--num-to-download',
help='Number of posts to download', type=int)
parser.add_argument('-l', '--log-level', help="Log level", default='info')
parser.add_argument('-m', '--add_metadata',
help=("Add metadata (caption/date) from Instagram "
"post into downloaded images' exif tags "
"(requires GExiv2 python module)"),
action='store_true', dest='use_metadata')
parser.add_argument('-v', '--get_videos',
help=("Download videos"),
action='store_true', dest='get_videos')
parser.add_argument('-p', '--process',
help=("Number of concurrent processes to use"),
action='store', dest='process_number',
type=int, default=100)
args = parser.parse_args()
username = args.username
directory = op.expanduser(args.directory)
raider = InstaRaider(username, directory,
num_to_download=args.num_to_download,
log_level=args.log_level,
use_metadata=args.use_metadata,
get_videos=args.get_videos,
process_number=args.process_number)
if not raider.validate():
return
raider.download_photos()
raider.download_videos()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot applies to wikisource sites to upload text.
Text is uploaded to pages in Page ns, for a specified Index.
Text to be stored, if the page is not-existing, is preloaded from the file used
to create the Index page, making the upload feature independent from the format
of the file, as long as it is supported by the MW ProofreadPage extension.
As alternative, if '-ocr' option is selected,
https://tools.wmflabs.org/phetools OCR tool will be used to get text.
In this case, also already existing pages with quality value 'Not Proofread'
can be treated. '-force' will override existing page in this case.
The following parameters are supported:
# TODO: update params + handle quality level
-index:... name of the index page
-pages:<start>-<end>,...<start>-<end>,<start>-<end>
Page range to upload;
optional, start=1, end=djvu file number of images.
Page ranges can be specified as:
A-B -> pages A until B
A- -> pages A until number of images
A -> just page A
-B -> pages 1 until B
-showdiff: show difference between curent text and new text when
saving the page
-ocr: use https://tools.wmflabs.org/phetools OCR tool to get text;
default is False, i.e. only not-(yet)-existing pages in Page
ns will be treated and text will be fetched via preload.
-force: overwrite existing pages;
default is False; valid only if '-ocr' is selected.
-summary: custom edit summary.
Use quotes if edit summary contains spaces.
-always don't bother asking to confirm any of the changes.
"""
#
# (C) Pywikibot team, 2016-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import itertools
import pywikibot
from pywikibot import i18n
from pywikibot.bot import SingleSiteBot
from pywikibot.proofreadpage import IndexPage, ProofreadPage
class UploadTextBot(SingleSiteBot):
"""
A bot that uploads text-layer to Page:namespace.
Text is fetched via preload as on Wikisource wikis, text can be preloaded
only if a page does not exist, if an Index page is present.
Works only on sites with Proofread Page extension installed.
"""
def __init__(self, generator, **kwargs):
"""
Constructor.
@param generator: page generator
@type generator: generator
"""
self.availableOptions.update({
'showdiff': False,
'force': False,
'ocr': False,
'summary': 'Bot: uploading text'
})
super(UploadTextBot, self).__init__(**kwargs)
self.generator = generator
# TODO: create i18 files
# Get edit summary message if it's empty.
if not self.getOption('summary'):
self.options['summary'] = i18n.twtranslate(
self.site, 'djvutext-creating')
def treat(self, page):
"""Process one ProofreadPage page.
@param page: page to be treated.
@type page: ProofreadPage
@raises: pywikibot.Error
"""
if not isinstance(page, ProofreadPage):
raise pywikibot.Error('Page %s must be a ProofreadPage object.'
% page)
summary = self.getOption('summary')
if page.exists():
old_text = page.text
else:
old_text = ''
if self.getOption('ocr'):
page.body = page.ocr()
if (page.exists() and
not (self.getOption('ocr') and self.getOption('force'))):
pywikibot.output('Page %s already exists, not adding!' % page)
else:
self.userPut(page, old_text, page.text, summary=summary,
show_diff=self.getOption('showdiff'))
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
index = None
pages = '1-'
options = {}
# Parse command line arguments.
local_args = pywikibot.handle_args(args)
for arg in local_args:
arg, sep, value = arg.partition(':')
if arg == '-index':
index = value
elif arg == '-pages':
pages = value
elif arg == '-showdiff':
options['showdiff'] = True
elif arg == '-summary':
options['summary'] = value
elif arg == '-ocr':
options['ocr'] = True
elif arg == '-force':
options['force'] = True
elif arg == '-always':
options['always'] = True
else:
pywikibot.output('Unknown argument %s' % arg)
# index is mandatory.
if not index:
pywikibot.bot.suggest_help(missing_parameters=['-index'])
return False
# '-force' can be used with '-ocr' only.
if 'force' in options and 'ocr' not in options:
pywikibot.error("'-force' can be used with '-ocr' option only.")
return False
site = pywikibot.Site()
if not site.has_extension('ProofreadPage'):
pywikibot.error('Site %s must have ProofreadPage extension.' % site)
return False
index = IndexPage(site, index)
if not index.exists():
pywikibot.error("Page %s doesn't exist." % index)
return False
# Parse pages param.
# Create a list of (start, end) tuples.
pages = pages.split(',')
for interval in range(len(pages)):
start, sep, end = pages[interval].partition('-')
start = 1 if not start else int(start)
if not sep:
end = start
else:
end = int(end) if end else index.num_pages
pages[interval] = (start, end)
# gen yields ProofreadPage objects.
gen_list = []
for start, end in sorted(pages):
gen = index.page_gen(start=start, end=end,
filter_ql=[1], content=False)
gen_list.append(gen)
gen = itertools.chain(*gen_list)
pywikibot.output('\nUploading text to %s\n' % index.title(asLink=True))
bot = UploadTextBot(gen, site=index.site, **options)
bot.run()
if __name__ == '__main__':
try:
main()
except Exception:
pywikibot.error('Fatal error:', exc_info=True)
|
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.legacy.andor.andor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Low level driver wrapping atcore andor library.
Sources::
- Andor Manual
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import ctypes as ct
from lantz import Driver, Feat, Action
from lantz.errors import InstrumentError
from lantz.foreign import LibraryDriver
_ERRORS = {
0: 'SUCCESS',
1: 'AT_ERR_NOTINITIALISED',
#1: 'AT_HANDLE_SYSTEM', # TODO: Check twice the same key!
2: 'AT_ERR_NOTIMPLEMENTED',
3: 'AT_ERR_READONLY',
4: 'AT_ERR_NOTREADABLE',
5: 'AT_ERR_NOTWRITABLE',
6: 'AT_ERR_OUTOFRANGE',
7: 'AT_ERR_INDEXNOTAVAILABLE',
8: 'AT_ERR_INDEXNOTIMPLEMENTED',
9: 'AT_ERR_EXCEEDEDMAXSTRINGLENGTH',
10: 'AT_ERR_CONNECTION',
11: 'AT_ERR_NODATA',
12: 'AT_ERR_INVALIDHANDLE',
13: 'AT_ERR_TIMEDOUT',
14: 'AT_ERR_BUFFERFULL',
15: 'AT_ERR_INVALIDSIZE',
16: 'AT_ERR_INVALIDALIGNMENT',
17: 'AT_ERR_COMM',
18: 'AT_ERR_STRINGNOTAVAILABLE',
19: 'AT_ERR_STRINGNOTIMPLEMENTED',
20: 'AT_ERR_NULL_FEATURE',
21: 'AT_ERR_NULL_HANDLE',
22: 'AT_ERR_NULL_IMPLEMENTED_VAR',
23: 'AT_ERR_NULL_READABLE_VAR',
24: 'AT_ERR_NULL_READONLY_VAR',
25: 'AT_ERR_NULL_WRITABLE_VAR',
26: 'AT_ERR_NULL_MINVALUE',
27: 'AT_ERR_NULL_MAXVALUE',
28: 'AT_ERR_NULL_VALUE',
29: 'AT_ERR_NULL_STRING',
30: 'AT_ERR_NULL_COUNT_VAR',
31: 'AT_ERR_NULL_ISAVAILABLE_VAR',
32: 'AT_ERR_NULL_MAXSTRINGLENGTH',
33: 'AT_ERR_NULL_EVCALLBACK',
34: 'AT_ERR_NULL_QUEUE_PTR',
35: 'AT_ERR_NULL_WAIT_PTR',
36: 'AT_ERR_NULL_PTRSIZE',
37: 'AT_ERR_NOMEMORY',
100: 'AT_ERR_HARDWARE_OVERFLOW',
-1: 'AT_HANDLE_UNINITIALISED'
}
class Andor(LibraryDriver):
LIBRARY_NAME = 'atcore.dll'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.AT_H = ct.c_int()
self.AT_U8 = ct.c_ubyte()
self.cameraIndex = ct.c_int(0)
def _patch_functions(self):
internal = self.lib.internal
internal.AT_Command.argtypes = [ct.c_int, ct.c_wchar_p, ]
internal.AT_GetInt.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_longlong)]
internal.AT_SetInt.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_longlong]
internal.AT_GetFloat.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_double)]
internal.AT_SetFloat.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_double]
internal.AT_GetBool.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_bool)]
internal.AT_SetBool.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_bool]
internal.AT_GetEnumerated.argtypes = [ct.c_int, ct.c_wchar_p, ct.addressof(ct.c_int)]
internal.AT_SetEnumerated.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_int]
internal.AT_SetEnumString.argtypes = [ct.c_int, ct.c_wchar_p, ct.c_wchar_p]
def _return_handler(self, func_name, ret_value):
if ret_value != 0:
raise InstrumentError('{} ({})'.format(ret_value, _ERRORS[ret_value]))
return ret_value
def initialize(self):
"""Initialize Library.
"""
self.lib.AT_InitialiseLibrary()
self.open()
def finalize(self):
"""Finalize Library. Concluding function.
"""
self.close()
self.lib.AT_FinaliseLibrary()
@Action()
def open(self):
"""Open camera self.AT_H.
"""
camidx = ct.c_int(0)
self.lib.AT_Open(camidx, ct.addressof(self.AT_H))
return self.AT_H
@Action()
def close(self):
"""Close camera self.AT_H.
"""
self.lib.AT_Close(self.AT_H)
def is_implemented(self, strcommand):
"""Checks if command is implemented.
"""
result = ct.c_bool()
command = ct.c_wchar_p(strcommand)
self.lib.AT_IsImplemented(self.AT_H, command, ct.addressof(result))
return result.value
def is_writable(self, strcommand):
"""Checks if command is writable.
"""
result = ct.c_bool()
command = ct.c_wchar_p(strcommand)
self.lib.AT_IsWritable(self.AT_H, command, ct.addressof(result))
return result.value
def queuebuffer(self, bufptr, value):
"""Put buffer in queue.
"""
value = ct.c_int(value)
self.lib.AT_QueueBuffer(self.AT_H, ct.byref(bufptr), value)
def waitbuffer(self, ptr, bufsize):
"""Wait for next buffer ready.
"""
timeout = ct.c_int(20000)
self.lib.AT_WaitBuffer(self.AT_H, ct.byref(ptr), ct.byref(bufsize), timeout)
def command(self, strcommand):
"""Run command.
"""
command = ct.c_wchar_p(strcommand)
self.lib.AT_Command(self.AT_H, command)
def getint(self, strcommand):
"""Run command and get Int return value.
"""
result = ct.c_longlong()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetInt(self.AT_H, command, ct.addressof(result))
return result.value
def setint(self, strcommand, value):
"""SetInt function.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_longlong(value)
self.lib.AT_SetInt(self.AT_H, command, value)
def getfloat(self, strcommand):
"""Run command and get Int return value.
"""
result = ct.c_double()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetFloat(self.AT_H, command, ct.addressof(result))
return result.value
def setfloat(self, strcommand, value):
"""Set command with Float value parameter.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_double(value)
self.lib.AT_SetFloat(self.AT_H, command, value)
def getbool(self, strcommand):
"""Run command and get Bool return value.
"""
result = ct.c_bool()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetBool(self.AT_H, command, ct.addressof(result))
return result.value
def setbool(self, strcommand, value):
"""Set command with Bool value parameter.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_bool(value)
self.lib.AT_SetBool(self.AT_H, command, value)
def getenumerated(self, strcommand):
"""Run command and set Enumerated return value.
"""
result = ct.c_int()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetEnumerated(self.AT_H, command, ct.addressof(result))
def setenumerated(self, strcommand, value):
"""Set command with Enumerated value parameter.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_bool(value)
self.lib.AT_SetEnumerated(self.AT_H, command, value) #TODO: IS THIS CORRECT
def setenumstring(self, strcommand, item):
"""Set command with EnumeratedString value parameter.
"""
command = ct.c_wchar_p(strcommand)
item = ct.c_wchar_p(item)
self.lib.AT_SetEnumString(self.AT_H, command, item)
def flush(self):
self.lib.AT_Flush(self.AT_H)
if __name__ == '__main__':
import numpy as np
import ctypes as ct
from andor import Andor
from matplotlib import pyplot as plt
with Andor() as andor:
andor.flush()
width = andor.getint("SensorWidth")
height = andor.getint("SensorHeight")
length = width * height
#andor.setenumerated("FanSpeed", 2)
andor.getfloat("SensorTemperature")
andor.setfloat("ExposureTime", 0.001)
andor.setenumstring("PixelReadoutRate", "100 MHz")
andor.setenumstring("PixelEncoding", "Mono32")
#andor.setenumstring("PixelEncoding", "Mono16")
imagesizebytes = andor.getint("ImageSizeBytes")
userbuffer = ct.create_string_buffer(' ' * imagesizebytes)
andor.queuebuffer(userbuffer, imagesizebytes)
imsize = ct.c_int(1)
ubuffer = ct.create_string_buffer(" " * 1)
andor.command("AcquisitionStart")
andor.waitbuffer(ubuffer, imsize)
andor.command("AcquisitionStop")
andor.flush()
image = np.fromstring(userbuffer, dtype=np.uint32, count=length)
#image = np.fromstring(userbuffer, dtype=np.uint16, count=length)
image.shape = (height, width)
im = plt.imshow(image, cmap = 'gray')
plt.show()
print(image.min(), image.max(), image.mean())
|
|
""" Data objects in group "Parametrics"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class ParametricSetValueForRun(DataObject):
""" Corresponds to IDD object `Parametric:SetValueForRun`
Parametric objects allow a set of multiple simulations to be defined in a single idf
file. The parametric preprocessor scans the idf for Parametric:* objects then creates
and runs multiple idf files, one for each defined simulation.
The core parametric object is Parametric:SetValueForRun which defines the name
of a parameters and sets the parameter to different values depending on which
run is being simulated.
"""
_schema = {'extensible-fields': OrderedDict([(u'value for run 1',
{'name': u'Value for Run 1',
'pyname': u'value_for_run_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Parametrics',
'min-fields': 2,
'name': u'Parametric:SetValueForRun',
'pyname': u'ParametricSetValueForRun',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| Parameter Name
| Must begin with the dollar sign character. The second character must be a letter.
| Remaining characters may only be letters or numbers. No spaces allowed.
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
value_for_run_1=None,
):
"""Add values for extensible fields.
Args:
value_for_run_1 (str): value for IDD Field `Value for Run 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
value_for_run_1 = self.check_value("Value for Run 1", value_for_run_1)
vals.append(value_for_run_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class ParametricLogic(DataObject):
""" Corresponds to IDD object `Parametric:Logic`
This object allows some types of objects to be included for some parametric cases and
not for others. For example, you might want an overhang on a window in some
parametric runs and not others. A single Parametric:Logic object is allowed per file.
Consult the Input Output Reference for available commands and syntax.
"""
_schema = {'extensible-fields': OrderedDict([(u'parametric logic line 1',
{'name': u'Parametric Logic Line 1',
'pyname': u'parametric_logic_line_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Parametrics',
'min-fields': 2,
'name': u'Parametric:Logic',
'pyname': u'ParametricLogic',
'required-object': False,
'unique-object': True}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
parametric_logic_line_1=None,
):
"""Add values for extensible fields.
Args:
parametric_logic_line_1 (str): value for IDD Field `Parametric Logic Line 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
parametric_logic_line_1 = self.check_value(
"Parametric Logic Line 1",
parametric_logic_line_1)
vals.append(parametric_logic_line_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class ParametricRunControl(DataObject):
""" Corresponds to IDD object `Parametric:RunControl`
Controls which parametric runs are simulated. This object is optional. If it is not
included, then all parametric runs are performed.
"""
_schema = {'extensible-fields': OrderedDict([(u'perform run 1',
{'name': u'Perform Run 1',
'pyname': u'perform_run_1',
'default': u'Yes',
'required-field': False,
'autosizable': False,
'accepted-values': [u'Yes',
u'No'],
'autocalculatable': False,
'type': 'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Parametrics',
'min-fields': 2,
'name': u'Parametric:RunControl',
'pyname': u'ParametricRunControl',
'required-object': False,
'unique-object': True}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
perform_run_1="Yes",
):
"""Add values for extensible fields.
Args:
perform_run_1 (str): value for IDD Field `Perform Run 1`
Default value: Yes
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
perform_run_1 = self.check_value("Perform Run 1", perform_run_1)
vals.append(perform_run_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class ParametricFileNameSuffix(DataObject):
""" Corresponds to IDD object `Parametric:FileNameSuffix`
Defines the suffixes to be appended to the idf and output file names for each
parametric run. If this object is omitted, the suffix will default to the run number.
"""
_schema = {'extensible-fields': OrderedDict([(u'suffix for file name in run 1',
{'name': u'Suffix for File Name in Run 1',
'pyname': u'suffix_for_file_name_in_run_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Parametrics',
'min-fields': 2,
'name': u'Parametric:FileNameSuffix',
'pyname': u'ParametricFileNameSuffix',
'required-object': False,
'unique-object': True}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
suffix_for_file_name_in_run_1=None,
):
"""Add values for extensible fields.
Args:
suffix_for_file_name_in_run_1 (str): value for IDD Field `Suffix for File Name in Run 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
suffix_for_file_name_in_run_1 = self.check_value(
"Suffix for File Name in Run 1",
suffix_for_file_name_in_run_1)
vals.append(suffix_for_file_name_in_run_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
|
|
# -*- coding: utf-8 -*-
"""
celery.app
~~~~~~~~~~
Celery Application.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import threading
from .. import registry
from ..utils import cached_property, instantiate
from . import annotations
from . import base
class _TLS(threading.local):
#: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute
#: sets this, so it will always contain the last instantiated app,
#: and is the default app returned by :func:`app_or_default`.
current_app = None
#: The currently executing task.
current_task = None
_tls = _TLS()
class AppPickler(object):
"""Default application pickler/unpickler."""
def __call__(self, cls, *args):
kwargs = self.build_kwargs(*args)
app = self.construct(cls, **kwargs)
self.prepare(app, **kwargs)
return app
def prepare(self, app, **kwargs):
app.conf.update(kwargs["changes"])
def build_kwargs(self, *args):
return self.build_standard_kwargs(*args)
def build_standard_kwargs(self, main, changes, loader, backend, amqp,
events, log, control, accept_magic_kwargs):
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
changes=changes, events=events, log=log, control=control,
set_as_current=False,
accept_magic_kwargs=accept_magic_kwargs)
def construct(self, cls, **kwargs):
return cls(**kwargs)
def _unpickle_app(cls, pickler, *args):
return pickler()(cls, *args)
class App(base.BaseApp):
"""Celery Application.
:param main: Name of the main module if running as `__main__`.
:keyword loader: The loader class, or the name of the loader class to use.
Default is :class:`celery.loaders.app.AppLoader`.
:keyword backend: The result store backend class, or the name of the
backend class to use. Default is the value of the
:setting:`CELERY_RESULT_BACKEND` setting.
:keyword amqp: AMQP object or class name.
:keyword events: Events object or class name.
:keyword log: Log object or class name.
:keyword control: Control object or class name.
:keyword set_as_current: Make this the global current app.
"""
Pickler = AppPickler
def set_current(self):
"""Make this the current app for this thread."""
_tls.current_app = self
def on_init(self):
if self.set_as_current:
self.set_current()
def create_task_cls(self):
"""Creates a base task class using default configuration
taken from this app."""
conf = self.conf
from .task import BaseTask
class Task(BaseTask):
abstract = True
app = self
backend = self.backend
exchange_type = conf.CELERY_DEFAULT_EXCHANGE_TYPE
delivery_mode = conf.CELERY_DEFAULT_DELIVERY_MODE
send_error_emails = conf.CELERY_SEND_TASK_ERROR_EMAILS
error_whitelist = conf.CELERY_TASK_ERROR_WHITELIST
serializer = conf.CELERY_TASK_SERIALIZER
rate_limit = conf.CELERY_DEFAULT_RATE_LIMIT
track_started = conf.CELERY_TRACK_STARTED
acks_late = conf.CELERY_ACKS_LATE
ignore_result = conf.CELERY_IGNORE_RESULT
store_errors_even_if_ignored = \
conf.CELERY_STORE_ERRORS_EVEN_IF_IGNORED
accept_magic_kwargs = self.accept_magic_kwargs
Task.__doc__ = BaseTask.__doc__
return Task
def Worker(self, **kwargs):
"""Create new :class:`~celery.apps.worker.Worker` instance."""
return instantiate("celery.apps.worker:Worker", app=self, **kwargs)
def WorkController(self, **kwargs):
return instantiate("celery.worker:WorkController", app=self, **kwargs)
def Beat(self, **kwargs):
"""Create new :class:`~celery.apps.beat.Beat` instance."""
return instantiate("celery.apps.beat:Beat", app=self, **kwargs)
def TaskSet(self, *args, **kwargs):
"""Create new :class:`~celery.task.sets.TaskSet`."""
return instantiate("celery.task.sets:TaskSet",
app=self, *args, **kwargs)
def worker_main(self, argv=None):
"""Run :program:`celeryd` using `argv`. Uses :data:`sys.argv`
if `argv` is not specified."""
return instantiate("celery.bin.celeryd:WorkerCommand", app=self) \
.execute_from_commandline(argv)
def task(self, *args, **options):
"""Decorator to create a task class out of any callable.
**Examples:**
.. code-block:: python
@task()
def refresh_feed(url):
return Feed.objects.get(url=url).refresh()
with setting extra options and using retry.
.. code-block:: python
from celery.task import current
@task(exchange="feeds")
def refresh_feed(url):
try:
return Feed.objects.get(url=url).refresh()
except socket.error, exc:
current.retry(exc=exc)
Calling the resulting task::
>>> refresh_feed("http://example.com/rss") # Regular
<Feed: http://example.com/rss>
>>> refresh_feed.delay("http://example.com/rss") # Async
<AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
"""
def inner_create_task_cls(**options):
def _create_task_cls(fun):
base = options.pop("base", None) or self.Task
T = type(fun.__name__, (base, ), dict({
"app": self,
"accept_magic_kwargs": False,
"run": staticmethod(fun),
"__doc__": fun.__doc__,
"__module__": fun.__module__}, **options))()
return registry.tasks[T.name] # global instance.
return _create_task_cls
if len(args) == 1 and callable(args[0]):
return inner_create_task_cls(**options)(*args)
return inner_create_task_cls(**options)
def annotate_task(self, task):
if self.annotations:
match = annotations._first_match(self.annotations, task)
for attr, value in (match or {}).iteritems():
setattr(task, attr, value)
match_any = annotations._first_match_any(self.annotations)
for attr, value in (match_any or {}).iteritems():
setattr(task, attr, value)
@cached_property
def Task(self):
"""Default Task base class for this application."""
return self.create_task_cls()
@cached_property
def annotations(self):
return annotations.prepare(self.conf.CELERY_ANNOTATIONS)
def __repr__(self):
return "<Celery: %s:0x%x>" % (self.main or "__main__", id(self), )
def __reduce__(self):
# Reduce only pickles the configuration changes,
# so the default configuration doesn't have to be passed
# between processes.
return (_unpickle_app, (self.__class__, self.Pickler)
+ self.__reduce_args__())
def __reduce_args__(self):
return (self.main,
self.conf.changes,
self.loader_cls,
self.backend_cls,
self.amqp_cls,
self.events_cls,
self.log_cls,
self.control_cls,
self.accept_magic_kwargs)
#: The "default" loader is the default loader used by old applications.
default_loader = os.environ.get("CELERY_LOADER") or "default"
#: Global fallback app instance.
default_app = App("default", loader=default_loader,
set_as_current=False,
accept_magic_kwargs=True)
def current_app():
return getattr(_tls, "current_app", None) or default_app
def current_task():
return getattr(_tls, "current_task", None)
def _app_or_default(app=None):
"""Returns the app provided or the default app if none.
The environment variable :envvar:`CELERY_TRACE_APP` is used to
trace app leaks. When enabled an exception is raised if there
is no active app.
"""
if app is None:
return getattr(_tls, "current_app", None) or default_app
return app
def _app_or_default_trace(app=None): # pragma: no cover
from traceback import print_stack
from multiprocessing import current_process
if app is None:
if getattr(_tls, "current_app", None):
print("-- RETURNING TO CURRENT APP --") # noqa+
print_stack()
return _tls.current_app
if current_process()._name == "MainProcess":
raise Exception("DEFAULT APP")
print("-- RETURNING TO DEFAULT APP --") # noqa+
print_stack()
return default_app
return app
def enable_trace():
global app_or_default
app_or_default = _app_or_default_trace
def disable_trace():
global app_or_default
app_or_default = _app_or_default
app_or_default = _app_or_default
if os.environ.get("CELERY_TRACE_APP"): # pragma: no cover
enable_trace()
|
|
#!/usr/bin/env python
"""Utilities working with Flask UIs"""
__author__ = 'Michael Meisinger, Stephen Henrie'
import traceback
import flask
from flask import request, jsonify
import sys
import json
import simplejson
from pyon.public import BadRequest, OT, get_ion_ts_millis
from pyon.util.containers import get_datetime
from interface.objects import ActorIdentity, SecurityToken, TokenTypeEnum
CONT_TYPE_JSON = "application/json"
CONT_TYPE_HTML = "text/html"
class UIExtension(object):
def on_init(self, ui_server, flask_app):
pass
def on_start(self):
pass
def on_stop(self):
pass
def extend_user_session_attributes(self, session, actor_obj):
pass
# -------------------------------------------------------------------------
# Content encoding helpers
# Set standard json functions
json_dumps = json.dumps
json_loads = simplejson.loads # Faster loading than regular json
def encode_ion_object(obj):
return obj.__dict__
# -------------------------------------------------------------------------
# UI helpers
def build_json_response(result_obj):
status = 200
result = dict(status=status, result=result_obj)
return jsonify(result)
def build_json_error():
(type, value, tb) = sys.exc_info()
status = getattr(value, "status_code", 500)
result = dict(error=dict(message=value.message, exception=type.__name__, trace=traceback.format_exc()),
status=status)
json_resp = jsonify(result)
json_resp.status_code = status
return json_resp, status
def get_arg(arg_name, default="", is_mult=False):
if is_mult:
aval = request.form.getlist(arg_name)
return aval
else:
aval = request.values.get(arg_name, None)
return str(aval) if aval else default
def get_auth():
""" Returns a dict with user session values from server session. """
return dict(user_id=flask.session.get("actor_id", ""),
actor_id=flask.session.get("actor_id", ""),
username=flask.session.get("username", ""),
full_name=flask.session.get("full_name", ""),
attributes=flask.session.get("attributes", {}),
roles=flask.session.get("roles", {}),
is_logged_in=bool(flask.session.get("actor_id", "")),
is_registered=bool(flask.session.get("actor_id", "")),
valid_until=flask.session.get("valid_until", 0))
def set_auth(actor_id, username, full_name, valid_until, **kwargs):
""" Sets server session based on user attributes. """
flask.session["actor_id"] = actor_id or ""
flask.session["username"] = username or ""
flask.session["full_name"] = full_name or ""
flask.session["valid_until"] = valid_until or 0
flask.session["attributes"] = kwargs.copy()
flask.session["roles"] = {}
flask.session.modified = True
def clear_auth():
""" Clears server session and empties user attributes. """
flask.session["actor_id"] = ""
flask.session["username"] = ""
flask.session["full_name"] = ""
flask.session["valid_until"] = 0
flask.session["attributes"] = {}
flask.session["roles"] = {}
flask.session.modified = True
def get_req_bearer_token():
auth_hdr = request.headers.get("authorization", None)
if auth_hdr and auth_hdr.startswith("Bearer "):
token = auth_hdr[7:]
return token
return None
class OAuthClientObj(object):
"""
Object holding information about an OAuth2 client for flask-oauthlib.
"""
client_id = None
client_secret = "foo"
is_confidential = False
_redirect_uris = "https://foo"
_default_scopes = "scioncc"
@classmethod
def from_actor_identity(cls, actor_obj):
""" Factory method from a suitable ActorIdentity object """
if not actor_obj or not isinstance(actor_obj, ActorIdentity) or not actor_obj.details or \
actor_obj.details.type_ != OT.OAuthClientIdentityDetails:
raise BadRequest("Bad actor identity object")
oauth_client = OAuthClientObj()
oauth_client.actor = actor_obj
oauth_client.client_id = actor_obj._id
oauth_client.is_confidential = actor_obj.details.is_confidential
oauth_client._redirect_uris = actor_obj.details.redirect_uris
oauth_client._default_scopes = actor_obj.details.default_scopes
return oauth_client
@property
def client_type(self):
if self.is_confidential:
return 'confidential'
return 'public'
@property
def redirect_uris(self):
if self._redirect_uris:
return self._redirect_uris.split()
return []
@property
def default_redirect_uri(self):
return self.redirect_uris[0] if self.redirect_uris else ""
@property
def default_scopes(self):
if self._default_scopes:
return self._default_scopes.split()
return []
class OAuthTokenObj(object):
"""
Object holding information for an OAuth2 token for flask-oauthlib.
"""
access_token = None
refresh_token = None
token_type = None
client_id = None
expires = None
user = None
_scopes = None
_token_obj = None
@classmethod
def from_security_token(cls, token_obj):
""" Factory method from a SecurityToken object """
if not token_obj or not isinstance(token_obj, SecurityToken) \
or not token_obj.token_type in (TokenTypeEnum.OAUTH_ACCESS, TokenTypeEnum.OAUTH_REFRESH):
raise BadRequest("Bad token object")
oauth_token = OAuthTokenObj()
oauth_token.access_token = token_obj.attributes.get("access_token", "")
oauth_token.refresh_token = token_obj.attributes.get("refresh_token", "")
oauth_token.token_type = "Bearer"
oauth_token._scopes = token_obj.attributes.get("scopes", "")
oauth_token.client_id = token_obj.attributes.get("client_id", "")
oauth_token.expires = get_datetime(token_obj.expires, local_time=False)
oauth_token.user = {"actor_id": token_obj.actor_id}
oauth_token._token_obj = token_obj
return oauth_token
def is_valid(self, check_expiry=False):
if not self._token_obj:
return False
if self._token_obj.status != "OPEN":
return False
if check_expiry and int(self._token_obj.expires) < get_ion_ts_millis():
return False
return True
def delete(self):
print "### DELETE TOKEN", self.access_token
return self
@property
def scopes(self):
if self._scopes:
return self._scopes.split()
return []
|
|
"""
FlexGet build and development utilities - unfortunately this file is somewhat messy
"""
from __future__ import print_function
import glob
import os
import shutil
import sys
from paver.easy import environment, task, cmdopts, Bunch, path, call_task, might_call, consume_args
# These 2 packages do magic on import, even though they aren't used explicitly
import paver.virtual
import paver.setuputils
from paver.shell import sh
from paver.setuputils import setup, find_package_data, find_packages
sphinxcontrib = False
try:
from sphinxcontrib import paverutils
sphinxcontrib = True
except ImportError:
pass
sys.path.insert(0, '')
options = environment.options
install_requires = [
'FeedParser>=5.2.1',
# There is a bug in sqlalchemy 0.9.0, see gh#127
'SQLAlchemy >=0.7.5, !=0.9.0, <1.999',
'PyYAML',
# There is a bug in beautifulsoup 4.2.0 that breaks imdb parsing, see http://flexget.com/ticket/2091
'beautifulsoup4>=4.1, !=4.2.0, <4.5',
'html5lib>=0.11',
'PyRSS2Gen',
'pynzb',
'progressbar',
'rpyc',
'jinja2',
# There is a bug in requests 2.4.0 where it leaks urllib3 exceptions
'requests>=1.0, !=2.4.0, <2.99',
'python-dateutil!=2.0, !=2.2',
'jsonschema>=2.0',
'tmdb3',
'path.py',
'guessit>=2.0.3',
'apscheduler',
'pytvmaze>=1.4.4',
'ordereddict>=1.1',
# WebUI Requirements
'cherrypy>=3.7.0',
'flask>=0.7',
'flask-restful>=0.3.3',
'flask-restplus==0.8.6',
'flask-compress>=1.2.1',
'flask-login>=0.3.2',
'flask-cors>=2.1.2',
'pyparsing>=2.0.3',
'Safe'
]
if sys.version_info < (2, 7):
# argparse is part of the standard library in python 2.7+
install_requires.append('argparse')
entry_points = {'console_scripts': ['flexget = flexget:main']}
# Provide an alternate exe on windows which does not cause a pop-up when scheduled
if sys.platform.startswith('win'):
entry_points.setdefault('gui_scripts', []).append('flexget-headless = flexget:main')
with open("README.rst") as readme:
long_description = readme.read()
# Populates __version__ without importing the package
__version__ = None
execfile('flexget/_version.py')
if not __version__:
print('Could not find __version__ from flexget/_version.py')
sys.exit(1)
setup(
name='FlexGet',
version=__version__, # release task may edit this
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
author='Marko Koivusalo',
author_email='marko.koivusalo@gmail.com',
license='MIT',
url='http://flexget.com',
download_url='http://download.flexget.com',
install_requires=install_requires,
packages=find_packages(exclude=['tests']),
package_data=find_package_data('flexget', package='flexget',
exclude=['FlexGet.egg-info', '*.pyc'],
exclude_directories=['node_modules', 'bower_components', '.tmp'],
only_in_packages=False), # NOTE: the exclude does not seem to work
zip_safe=False,
test_suite='nose.collector',
extras_require={
'memusage': ['guppy'],
'NZB': ['pynzb'],
'TaskTray': ['pywin32'],
},
entry_points=entry_points,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
)
options(
minilib=Bunch(
# 'version' is included as workaround to https://github.com/paver/paver/issues/112, TODO: remove
extra_files=['virtual', 'svn', 'version']
),
virtualenv=Bunch(
paver_command_line='develop'
),
# sphinxcontrib.paverutils
sphinx=Bunch(
docroot='docs',
builddir='build',
builder='html',
confdir='docs'
),
)
def set_init_version(ver):
"""Replaces the version with ``ver`` in _version.py"""
import fileinput
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ = '):
line = "__version__ = '%s'\n" % ver
print(line, end='')
@task
def version():
"""Prints the version number of the source"""
print(__version__)
@task
@cmdopts([('dev', None, 'Bumps to new development version instead of release version.')])
def increment_version(options):
"""Increments either release or dev version by 1"""
print('current version: %s' % __version__)
ver_split = __version__.split('.')
dev = options.increment_version.get('dev')
if 'dev' in ver_split[-1]:
if dev:
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if dev:
ver_split.append('dev')
new_version = '.'.join(ver_split)
print('new version: %s' % new_version)
set_init_version(new_version)
@task
@cmdopts([
('online', None, 'Run online tests')
])
def test(options):
"""Run FlexGet unit tests"""
options.setdefault('test', Bunch())
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
args = []
# Adding the -v flag makes the tests fail in python 2.7
#args.append('-v')
args.append('--processes=4')
args.append('-x')
if not options.test.get('online'):
args.append('--attr=!online')
args.append('--where=tests')
# Store current path since --where changes it, restore when leaving
cwd = os.getcwd()
try:
return nose.run(argv=args, config=cfg)
finally:
os.chdir(cwd)
@task
def clean():
"""Cleans up the virtualenv"""
for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man',
'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'):
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
for pkg in set(options.setup.packages) | set(('tests',)):
for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"):
path(filename).remove()
@task
@cmdopts([
('dist-dir=', 'd', 'directory to put final built distributions in'),
('revision=', 'r', 'minor revision number of this build')
])
def sdist(options):
"""Build tar.gz distribution package"""
print('sdist version: %s' % __version__)
# clean previous build
print('Cleaning build...')
for p in ['build']:
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
else:
print('Unable to remove %s' % pth)
# remove pre-compiled pycs from tests, I don't know why paver even tries to include them ...
# seems to happen only with sdist though
for pyc in path('tests/').files('*.pyc'):
pyc.remove()
for t in ['minilib', 'generate_setup', 'setuptools.command.sdist']:
call_task(t)
@task
def coverage():
"""Make coverage.flexget.com"""
# --with-coverage --cover-package=flexget --cover-html --cover-html-dir /var/www/flexget_coverage/
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
argv = ['bin/paver']
argv.extend(['--attr=!online'])
argv.append('--with-coverage')
argv.append('--cover-html')
argv.extend(['--cover-package', 'flexget'])
argv.extend(['--cover-html-dir', '/var/www/flexget_coverage/'])
nose.run(argv=argv, config=cfg)
print('Coverage generated')
@task
@cmdopts([
('docs-dir=', 'd', 'directory to put the documetation in')
])
def docs():
if not sphinxcontrib:
print('ERROR: requires sphinxcontrib-paverutils')
sys.exit(1)
from paver import tasks
if not os.path.exists('build'):
os.mkdir('build')
if not os.path.exists(os.path.join('build', 'sphinx')):
os.mkdir(os.path.join('build', 'sphinx'))
setup_section = tasks.environment.options.setdefault("sphinx", Bunch())
setup_section.update(outdir=options.docs.get('docs_dir', 'build/sphinx'))
call_task('sphinxcontrib.paverutils.html')
@task
@might_call('test', 'sdist')
@cmdopts([('no-tests', None, 'skips unit tests')])
def release(options):
"""Run tests then make an sdist if successful."""
if not options.release.get('no_tests'):
if not test():
print('Unit tests did not pass')
sys.exit(1)
print('Making src release')
sdist()
@task
def install_tools():
"""Install development / jenkins tools and dependencies"""
try:
import pip
except ImportError:
print('FATAL: Unable to import pip, please install it and run this again!')
sys.exit(1)
try:
import sphinxcontrib
print('sphinxcontrib INSTALLED')
except ImportError:
pip.main(['install', 'sphinxcontrib-paverutils'])
pip.main(['install', '-r', 'jenkins-requirements.txt'])
@task
def clean_compiled():
for root, dirs, files in os.walk('flexget'):
for name in files:
fqn = os.path.join(root, name)
if fqn[-3:] == 'pyc' or fqn[-3:] == 'pyo' or fqn[-5:] == 'cover':
print('Deleting %s' % fqn)
os.remove(fqn)
@task
@consume_args
def pep8(args):
try:
import pep8
except:
print('Run bin/paver install_tools')
sys.exit(1)
# Ignoring certain errors
ignore = [
'E711', 'E712', # These are comparisons to singletons i.e. == False, and == None. We need these for sqlalchemy.
'W291', 'W293', 'E261',
'E128' # E128 continuation line under-indented for visual indent
]
styleguide = pep8.StyleGuide(show_source=True, ignore=ignore, repeat=1, max_line_length=120,
parse_argv=args)
styleguide.input_dir('flexget')
@task
@cmdopts([
('file=', 'f', 'name of the requirements file to create')
])
def requirements(options):
filename = options.requirements.get('file', 'requirements.txt')
with open(filename, mode='w') as req_file:
req_file.write('\n'.join(options.install_requires))
@task
def upgrade_deps():
try:
import pip
cmd = ['install', '--upgrade']
cmd.extend(install_requires)
pip.main(cmd)
except ImportError:
print('FATAL: Unable to import pip, please install it and run this again!')
sys.exit(1)
@task
def build_webui():
cwd = os.path.join('flexget', 'ui')
# Cleanup previous builds
for folder in ['bower_components' 'node_modules']:
folder = os.path.join(cwd, folder)
if os.path.exists(folder):
shutil.rmtree(folder)
# Install npm packages
sh(['npm', 'install'], cwd=cwd)
# Build the ui
sh(['bower', 'install'], cwd=cwd)
# Build the ui
sh('gulp buildapp', cwd=cwd)
|
|
import sublime
import sys
import os
from unittesting import DeferrableTestCase
####################
# TESTS FOR FUNCTIONS
####################
core = sys.modules['Requester.core']
class TestFunctions(DeferrableTestCase):
def test_prepare_request(self):
s = "get('http://httpbin.org/get')"
req = core.responses.prepare_request(s, {}, 0)
self.assertEqual(req.request, "requests.get('http://httpbin.org/get')")
def test_prepare_request_with_prefix(self):
s = "_s.get('http://httpbin.org/get')"
req = core.responses.prepare_request(s, {}, 0)
self.assertEqual(req.request, "_s.get('http://httpbin.org/get')")
def test_prepare_request_with_no_scheme(self):
s = "get('httpbin.org/get')"
req = core.responses.prepare_request(s, {}, 0)
self.assertEqual(req.url, 'http://httpbin.org/get')
self.assertEqual(req.args[0], 'http://httpbin.org/get')
####################
# HELPER FUNCTIONS
####################
def select_line_beginnings(view, lines, clear=True):
if not hasattr(lines, '__iter__'):
lines = [lines]
if clear:
view.sel().clear()
for line in lines:
view.sel().add(sublime.Region(
view.text_point(line-1, 0)
))
def get_line(view, line):
return view.substr(view.line(
view.text_point(line-1, 0)
))
####################
# TESTS FOR COMMANDS
####################
class TestRequesterMixin:
WAIT_MS = 750 # wait in ms for responses to return
def setUp(self):
self.config = sublime.load_settings('Requester.sublime-settings')
self.window = sublime.active_window()
if hasattr(self, 'REQUESTER_FILE'):
path = self.window.project_data()['folders'][0]['path']
self.view = self.get_scratch_view_from_file(os.path.join(path, self.REQUESTER_FILE))
if hasattr(self, 'REQUESTER_RESOURCE'):
self.view = self.get_scratch_view_from_resource(self.REQUESTER_RESOURCE)
def tearDown(self):
if self.view:
self.close_view(self.view)
self.window.run_command('requester_close_response_tabs')
def close_view(self, view):
self.window.focus_view(view)
self.window.run_command('close_file')
def get_scratch_view_from_file(self, file):
view = self.window.open_file(file)
view.set_scratch(True)
return view
def get_scratch_view_from_resource(self, resource):
content = sublime.load_resource(resource)
view = self.window.new_file()
view.set_name('Requester Tests')
view.run_command('requester_replace_view_text', {'text': content})
view.set_scratch(True)
return view
def _test_url_in_view(self, view, url):
self.assertEqual(
get_line(view, 5),
url
)
def _test_name_in_view(self, view, name):
self.assertEqual(view.name(), name)
def _test_string_in_view(self, view, string):
content = view.substr(sublime.Region(
0, view.size()
))
self.assertTrue(string in content)
class TestRequesterEnvFile(TestRequesterMixin, DeferrableTestCase):
REQUESTER_FILE = 'tests/requester_env_file.py'
def test_single_request_with_env_file(self):
"""From env file.
"""
yield 1000
select_line_beginnings(self.view, 8)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/get')
self._test_name_in_view(self.window.active_view(), 'GET: /get')
class TestRequester(TestRequesterMixin, DeferrableTestCase):
REQUESTER_RESOURCE = 'Packages/Requester/tests/requester.py'
def test_single_request(self):
"""Generic.
"""
select_line_beginnings(self.view, 6)
self.view.run_command('requester')
yield self.WAIT_MS # this use of yield CAN'T be moved into a helper, it needs to be part of a test method
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/post')
self._test_name_in_view(self.window.active_view(), 'POST: /post')
def test_single_request_no_prefix(self):
"""Without `requests.` prefix.
"""
select_line_beginnings(self.view, 7)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/get')
self._test_name_in_view(self.window.active_view(), 'GET: /get')
def test_single_request_on_multiple_lines(self):
"""Request on multiple lines.
"""
select_line_beginnings(self.view, 13)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/get')
self._test_name_in_view(self.window.active_view(), 'GET: /get')
def test_single_request_commented_out(self):
"""Commented out request on one line.
"""
select_line_beginnings(self.view, 16)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/get')
self._test_name_in_view(self.window.active_view(), 'GET: /get')
def test_single_request_with_env_block(self):
"""From env block.
"""
select_line_beginnings(self.view, 9)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/get?key1=value1')
self._test_name_in_view(self.window.active_view(), 'GET: /get')
def test_single_request_focus_change(self):
"""Test that re-executing request in requester file doesn't open response
tab, but rather reuses already open response tab.
"""
select_line_beginnings(self.view, 6)
self.view.run_command('requester')
yield self.WAIT_MS
group, index = self.window.get_view_index(self.window.active_view())
self.window.focus_view(self.view)
yield 1000
select_line_beginnings(self.view, 6)
self.view.run_command('requester')
yield self.WAIT_MS
new_group, new_index = self.window.get_view_index(self.window.active_view())
self.assertEqual(group, new_group)
self.assertEqual(index, new_index)
class TestRequesterMultiple(TestRequesterMixin, DeferrableTestCase):
REQUESTER_RESOURCE = 'Packages/Requester/tests/requester.py'
WAIT_MS = 750
def test_multiple_requests(self):
"""Tests the following:
- Blank lines are skipped in requester file
- 3 response tabs are opened when 3 requests are executed
- Focus doesn't change to any response tab after it appears
- Reordering response tabs works correctly
"""
select_line_beginnings(self.view, [6, 9, 10])
self.view.run_command('requester')
yield self.WAIT_MS
self.assertEqual(self.window.active_view(), self.view)
self.view.run_command('requester_reorder_response_tabs')
yield self.WAIT_MS
self.assertEqual(self.window.active_view(), self.view)
group, index = self.window.get_view_index(self.view)
for i, name in enumerate(['POST: /post', 'GET: /get', 'POST: /anything']):
self.window.run_command('select_by_index', {'index': index + i + 1})
yield self.WAIT_MS
self._test_name_in_view(self.window.active_view(), name)
class TestRequesterSession(TestRequesterMixin, DeferrableTestCase):
REQUESTER_RESOURCE = 'Packages/Requester/tests/requester_session.py'
WAIT_MS = 1000
def test_session(self):
"""Using session.
"""
select_line_beginnings(self.view, 12)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_url_in_view(self.window.active_view(), 'http://127.0.0.1:8000/cookies/set?k1=v1')
self._test_name_in_view(self.window.active_view(), 'GET: /cookies/set')
self._test_string_in_view(self.window.active_view(), "'X-Test': 'true'") # header added to session
self._test_string_in_view(self.window.active_view(), "'Cookie': 'k0=v0'") # cookies added to session
self._test_string_in_view(self.window.active_view(), "Response Cookies: {'k1': 'v1'}")
def test_prepared_request(self):
"""Using prepared request.
"""
select_line_beginnings(self.view, 14)
self.view.run_command('requester')
yield self.WAIT_MS
self._test_name_in_view(self.window.active_view(), 'POST: /post')
self._test_string_in_view(self.window.active_view(), "'Cookie': 'k0=v0'") # cookies added to session
|
|
import os
import glob
import shutil
import sys
import math
import datetime as dt
import subprocess as sp
import numpy as np
import warnings
import xml.etree.ElementTree as ET
from astropy import __version__ as astropy_version
from astropy import wcs
from astropy.io import fits, votable
from astropy.table import Table, Column, MaskedColumn, vstack, join
from astropy.coordinates import Angle, EarthLocation, SkyCoord, ICRS, AltAz
from astropy.coordinates import Galactic, GeocentricMeanEcliptic
from astropy.coordinates import match_coordinates_sky
from astropy import units as u
from astropy.time import Time
from astropy.stats import sigma_clip
from scipy.interpolate import InterpolatedUnivariateSpline, SmoothBivariateSpline
from scipy.ndimage.filters import generic_filter
from scipy.linalg import lstsq
from collections import OrderedDict
from ..database.database import PlateDB
from ..conf import read_conf
from .._version import __version__
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from scipy.spatial import cKDTree as KDT
except ImportError:
from scipy.spatial import KDTree as KDT
try:
from sklearn.cluster import DBSCAN
have_sklearn = True
except ImportError:
have_sklearn = False
try:
import MySQLdb
except ImportError:
pass
try:
import healpy
have_healpy = True
except ImportError:
have_healpy = False
try:
import statsmodels.api as sm
have_statsmodels = True
except ImportError:
have_statsmodels = False
_source_meta = OrderedDict([
('source_num', ('i4', '%7d', 'NUMBER')),
('x_source', ('f8', '%11.4f', 'X_IMAGE')),
('y_source', ('f8', '%11.4f', 'Y_IMAGE')),
('erra_source', ('f4', '%9.5f', 'ERRA_IMAGE')),
('errb_source', ('f4', '%9.5f', 'ERRB_IMAGE')),
('errtheta_source', ('f4', '%6.2f', 'ERRTHETA_IMAGE')),
('a_source', ('f4', '%9.3f', 'A_IMAGE')),
('b_source', ('f4', '%9.3f', 'B_IMAGE')),
('theta_source', ('f4', '%6.2f', 'THETA_IMAGE')),
('elongation', ('f4', '%8.3f', 'ELONGATION')),
('x_peak', ('i4', '%6d', 'XPEAK_IMAGE')),
('y_peak', ('i4', '%6d', 'YPEAK_IMAGE')),
('flag_usepsf', ('i1', '%1d', '')),
('x_image', ('f8', '%11.4f', 'X_IMAGE')),
('y_image', ('f8', '%11.4f', 'Y_IMAGE')),
('erra_image', ('f4', '%9.5f', 'ERRA_IMAGE')),
('errb_image', ('f4', '%9.5f', 'ERRB_IMAGE')),
('errtheta_image', ('f4', '%6.2f', 'ERRTHETA_IMAGE')),
('x_psf', ('f8', '%11.4f', '')),
('y_psf', ('f8', '%11.4f', '')),
('erra_psf', ('f4', '%9.5f', '')),
('errb_psf', ('f4', '%9.5f', '')),
('errtheta_psf', ('f4', '%6.2f', '')),
('mag_auto', ('f4', '%7.4f', 'MAG_AUTO')),
('magerr_auto', ('f4', '%7.4f', 'MAGERR_AUTO')),
('flux_auto', ('f4', '%12.5e', 'FLUX_AUTO')),
('fluxerr_auto', ('f4', '%12.5e', 'FLUXERR_AUTO')),
('mag_iso', ('f4', '%7.4f', 'MAG_ISO')),
('magerr_iso', ('f4', '%7.4f', 'MAGERR_ISO')),
('flux_iso', ('f4', '%12.5e', 'FLUX_ISO')),
('fluxerr_iso', ('f4', '%12.5e', 'FLUXERR_ISO')),
('flux_max', ('f4', '%12.5e', 'FLUX_MAX')),
('flux_radius', ('f4', '%12.5e', 'FLUX_RADIUS')),
('fwhm_image', ('f4', '%12.5e', 'FWHM_IMAGE')),
('isoarea', ('i4', '%6d', 'ISOAREA_IMAGE')),
('sqrt_isoarea', ('f4', '%12.5e', '')),
('background', ('f4', '%12.5e', 'BACKGROUND')),
('sextractor_flags', ('i2', '%3d', 'FLAGS')),
('dist_center', ('f4', '%9.3f', '')),
('dist_edge', ('f4', '%9.3f', '')),
('annular_bin', ('i2', '%1d', '')),
('flag_negradius', ('i1', '%1d', '')),
('flag_rim', ('i1', '%1d', '')),
('flag_clean', ('i1', '%1d', '')),
('model_prediction', ('f4', '%7.5f', '')),
('solution_num', ('i2', '%1d', '')),
('ra_icrs', ('f8', '%11.7f', '')),
('dec_icrs', ('f8', '%11.7f', '')),
('ra_error', ('f4', '%7.4f', '')),
('dec_error', ('f4', '%7.4f', '')),
('gal_lat', ('f8', '%11.7f', '')),
('gal_lon', ('f8', '%11.7f', '')),
('ecl_lat', ('f8', '%11.7f', '')),
('ecl_lon', ('f8', '%11.7f', '')),
('x_sphere', ('f8', '%10.7f', '')),
('y_sphere', ('f8', '%10.7f', '')),
('z_sphere', ('f8', '%10.7f', '')),
('healpix256', ('i4', '%6d', '')),
('healpix1024', ('i4', '%8d', '')),
('nn_dist', ('f4', '%6.3f', '')),
('zenith_angle', ('f4', '%7.4f', '')),
('airmass', ('f4', '%7.4f', '')),
('natmag', ('f4', '%7.4f', '')),
('natmag_error', ('f4', '%7.4f', '')),
('bpmag', ('f4', '%7.4f', '')),
('bpmag_error', ('f4', '%7.4f', '')),
('rpmag', ('f4', '%7.4f', '')),
('rpmag_error', ('f4', '%7.4f', '')),
('natmag_plate', ('f4', '%7.4f', '')),
('natmag_correction', ('f4', '%7.4f', '')),
('natmag_residual', ('f4', '%7.4f', '')),
('phot_range_flags', ('i2', '%1d', '')),
('phot_calib_flags', ('i2', '%1d', '')),
('color_term', ('f4', '%7.4f', '')),
('cat_natmag', ('f4', '%7.4f', '')),
('match_radius', ('f4', '%7.3f', '')),
('gaiaedr3_id', ('i8', '%d', '')),
('gaiaedr3_gmag', ('f4', '%7.4f', '')),
('gaiaedr3_bpmag', ('f4', '%7.4f', '')),
('gaiaedr3_rpmag', ('f4', '%7.4f', '')),
('gaiaedr3_bp_rp', ('f4', '%7.4f', '')),
('gaiaedr3_dist', ('f4', '%6.3f', '')),
('gaiaedr3_neighbors', ('i4', '%3d', ''))
])
def crossmatch_cartesian(coords_image, coords_ref, tolerance=None):
"""
Crossmatch source coordinates with reference-star coordinates.
Parameters
----------
coords_image : array-like
The coordinates of points to match
coords_ref : array-like
The coordinates of reference points to match
tolerance : float
Crossmatch distance in pixels (default: 5)
"""
if tolerance is None:
tolerance = 5.
try:
kdt = KDT(coords_ref, balanced_tree=False, compact_nodes=False)
except TypeError:
kdt = KDT(coords_ref)
ds,ind_ref = kdt.query(coords_image, k=1, distance_upper_bound=tolerance)
mask_xmatch = ds < tolerance
ind_image = np.arange(len(coords_image))
return ind_image[mask_xmatch], ind_ref[mask_xmatch], ds[mask_xmatch]
class SourceTable(Table):
"""
Source table class
"""
def __init__(self, *args, **kwargs):
num_sources = kwargs.pop('num_sources', None)
super().__init__(*args, **kwargs)
#self.filename = os.path.basename(filename)
#self.archive_id = archive_id
self.basefn = ''
self.fn_fits = ''
self.process_id = None
self.scan_id = None
self.plate_id = None
self.log = None
self.work_dir = ''
self.scratch_dir = None
self.write_source_dir = ''
self.write_db_source_dir = ''
self.write_db_source_calib_dir = ''
self.plate_epoch = 1950
self.plate_year = int(self.plate_epoch)
self.threshold_sigma = 4.
self.use_filter = False
self.filter_path = None
self.use_psf = False
self.psf_threshold_sigma = 20.
self.psf_model_sigma = 20.
self.min_model_sources = 100
self.max_model_sources = 10000
self.sip = 3
self.skip_bright = 10
self.distort = 3
self.subfield_distort = 1
self.max_recursion_depth = 5
self.force_recursion_depth = 0
self.circular_film = False
self.crossmatch_radius = None
self.crossmatch_nsigma = 10.
self.crossmatch_nlogarea = 2.
self.crossmatch_maxradius = 20.
self.plate_header = None
self.platemeta = None
self.imwidth = None
self.imheight = None
self.plate_solved = False
self.mean_pixscale = None
self.num_sources = num_sources
self.num_sources_sixbins = None
self.rel_area_sixbins = None
self.min_ra = None
self.max_ra = None
self.min_dec = None
self.max_dec = None
self.ncp_close = None
self.scp_close = None
self.ncp_on_plate = None
self.scp_on_plate = None
self.pattern_x = None
self.pattern_y = None
self.pattern_ratio = None
self.num_crossmatch_gaia = None
self.neighbors_gaia = None
def populate(self, num_sources=0):
"""
Populate table with columns.
"""
for k in _source_meta:
zerodata = np.zeros(num_sources, dtype=_source_meta[k][0])
self.add_column(Column(name=k, dtype=_source_meta[k][0],
data=zerodata))
self['flag_usepsf'] = 0
self['x_psf'] = np.nan
self['y_psf'] = np.nan
self['erra_psf'] = np.nan
self['errb_psf'] = np.nan
self['errtheta_psf'] = np.nan
self['ra_icrs'] = np.nan
self['dec_icrs'] = np.nan
self['ra_error'] = np.nan
self['dec_error'] = np.nan
self['gal_lat'] = np.nan
self['gal_lon'] = np.nan
self['ecl_lat'] = np.nan
self['ecl_lon'] = np.nan
self['x_sphere'] = np.nan
self['y_sphere'] = np.nan
self['z_sphere'] = np.nan
self['healpix256'] = -1
self['healpix1024'] = -1
self['nn_dist'] = np.nan
self['zenith_angle'] = np.nan
self['airmass'] = np.nan
self['gaiaedr3_gmag'] = np.nan
self['gaiaedr3_bpmag'] = np.nan
self['gaiaedr3_rpmag'] = np.nan
self['gaiaedr3_bp_rp'] = np.nan
self['gaiaedr3_dist'] = np.nan
self['gaiaedr3_neighbors'] = 0
self['natmag'] = np.nan
self['natmag_error'] = np.nan
self['bpmag'] = np.nan
self['bpmag_error'] = np.nan
self['rpmag'] = np.nan
self['rpmag_error'] = np.nan
self['natmag_residual'] = np.nan
self['natmag_correction'] = np.nan
self['color_term'] = np.nan
self['cat_natmag'] = np.nan
self['phot_range_flags'] = 0
self['phot_calib_flags'] = 0
def assign_conf(self, conf):
"""
Parse configuration and set class attributes.
"""
if isinstance(conf, str):
conf = read_conf(conf)
self.conf = conf
try:
self.archive_id = conf.getint('Archive', 'archive_id')
except ValueError:
print('Error in configuration file '
'([{}], {})'.format('Archive', attr))
except configparser.Error:
pass
for attr in ['sextractor_path', 'scamp_path', 'psfex_path',
'solve_field_path', 'wcs_to_tan_path']:
try:
setattr(self, attr, conf.get('Programs', attr))
except configparser.Error:
pass
for attr in ['fits_dir', 'index_dir', 'gaia_dir', 'tycho2_dir',
'work_dir', 'write_log_dir', 'write_phot_dir',
'write_source_dir', 'write_wcs_dir',
'write_db_source_dir', 'write_db_source_calib_dir']:
try:
setattr(self, attr, conf.get('Files', attr))
except configparser.Error:
pass
if self.write_log_dir:
self.enable_log = True
for attr in ['use_gaia_fits', 'use_tycho2_fits',
'use_ucac4_db', 'use_apass_db',
'enable_db_log', 'write_sources_csv']:
try:
setattr(self, attr, conf.getboolean('Database', attr))
except ValueError:
print('Error in configuration file '
'([{}], {})'.format('Database', attr))
except configparser.Error:
pass
for attr in ['ucac4_db_host', 'ucac4_db_user', 'ucac4_db_name',
'ucac4_db_passwd', 'ucac4_db_table',
'apass_db_host', 'apass_db_user', 'apass_db_name',
'apass_db_passwd', 'apass_db_table',
'output_db_host', 'output_db_user',
'output_db_name', 'output_db_passwd']:
try:
setattr(self, attr, conf.get('Database', attr))
except configparser.Error:
pass
for attr in ['use_filter', 'use_psf', 'circular_film']:
try:
setattr(self, attr, conf.getboolean('Solve', attr))
except ValueError:
print('Error in configuration file '
'([{}], {})'.format('Solve', attr))
except configparser.Error:
pass
for attr in ['plate_epoch', 'threshold_sigma',
'psf_threshold_sigma', 'psf_model_sigma',
'crossmatch_radius', 'crossmatch_nsigma',
'crossmatch_nlogarea', 'crossmatch_maxradius']:
try:
setattr(self, attr, conf.getfloat('Solve', attr))
except ValueError:
print('Error in configuration file '
'([{}], {})'.format('Solve', attr))
except configparser.Error:
pass
for attr in ['sip', 'skip_bright', 'distort', 'subfield_distort',
'max_recursion_depth', 'force_recursion_depth',
'min_model_sources', 'max_model_sources']:
try:
setattr(self, attr, conf.getint('Solve', attr))
except ValueError:
print('Error in configuration file '
'([{}], {})'.format('Solve', attr))
except configparser.Error:
pass
for attr in ['filter_path', 'astref_catalog', 'photref_catalog']:
try:
setattr(self, attr, conf.get('Solve', attr))
except configparser.Error:
pass
# Read UCAC4 and APASS table column names from the dedicated sections,
# named after the tables
if conf.has_section(self.ucac4_db_table):
for attr in self.ucac4_columns.keys():
try:
colstr = conf.get(self.ucac4_db_table, attr)
_,typ = self.ucac4_columns[attr]
self.ucac4_columns[attr] = (colstr, typ)
except configparser.Error:
pass
if conf.has_section(self.apass_db_table):
for attr in self.apass_columns.keys():
try:
colstr = conf.get(self.apass_db_table, attr)
_,typ = self.apass_columns[attr]
self.apass_columns[attr] = (colstr, typ)
except configparser.Error:
pass
def copy_from_sextractor(self, xycat):
"""
Copy source data from SExtractor output file.
Parameters
----------
xycat: astropy.io.fits.HDUList object
"""
for k,v in [(n,_source_meta[n][2]) for n in _source_meta
if _source_meta[n][2]]:
self[k] = xycat[1].data.field(v)
def apply_scanner_pattern(self, plate_solution=None):
"""
Correct source coordinates for scanner pattern.
Parameters
----------
plate_solution: PlateSolution instance
"""
from .solve import PlateSolution
assert isinstance(plate_solution, PlateSolution)
assert plate_solution.pattern_ratio is not None
if plate_solution.pattern_ratio > 1.5:
y_source = self['y_source']
self['y_source'] = (y_source - plate_solution.pattern_y(y_source))
elif plate_solution.pattern_ratio < 2./3.:
x_source = self['x_source']
self['x_source'] = (x_source - plate_solution.pattern_x(x_source))
def crossmatch_gaia(self, plate_solution=None, star_catalog=None):
"""
Crossmatch sources with Gaia objects, considering multiple solutions.
Parameters:
-----------
plate_solution : :class:`solve.PlateSolution`
Plate solution with one or more astrometric solutions
star_catalog : :class:`catalog.StarCatalog`
External star catalog with Gaia data
"""
from .solve import PlateSolution
from .catalog import StarCatalog
self.log.write('Crossmatching sources with Gaia objects',
level=3, event=44)
if plate_solution is None or plate_solution.num_solutions == 0:
self.log.write('Cannot crossmatch sources with Gaia objects '
'due to missing astrometric solutions!',
level=2, event=44)
return
if star_catalog is None:
self.log.write('Cannot crossmatch sources with Gaia objects '
'due to missing Gaia catalog data!',
level=2, event=44)
return
assert isinstance(plate_solution, PlateSolution)
assert isinstance(star_catalog, StarCatalog)
# Take parameters from plate_solution
num_solutions = plate_solution.num_solutions
solutions = plate_solution.solutions
mean_pixscale = plate_solution.mean_pixel_scale
# Number of Gaia stars
num_gaia = len(star_catalog)
self.log.write('Number of Gaia stars: {:d}'.format(num_gaia),
level=4, event=44, double_newline=False)
# Calculate RA and Dec for the plate epoch
ra_ref = (star_catalog['ra']
+ (self.plate_epoch - star_catalog['ref_epoch'])
* star_catalog['pmra']
/ np.cos(star_catalog['dec'] * np.pi / 180.) / 3600000.)
dec_ref = (star_catalog['dec']
+ (self.plate_epoch - star_catalog['ref_epoch'])
* star_catalog['pmdec'] / 3600000.)
#catalog = SkyCoord(ra_ref, dec_ref, frame='icrs')
xy_ref = np.empty((0, 2))
sol_ref = np.empty((0,), dtype=np.int8)
index_ref = np.empty((0,), dtype=np.int32)
# Build a list of Gaia stars in image coordinates
for i in np.arange(plate_solution.num_solutions):
solution = solutions[i]
# If there is a column named 'solution_num', then take only
# reference stars with the current solution number
if 'solution_num' in star_catalog.columns:
mask_sol = star_catalog['solution_num'] == i + 1
else:
mask_sol = np.full(num_gaia, True)
w = wcs.WCS(solution['header_wcs'])
try:
xr,yr = w.all_world2pix(ra_ref[mask_sol], dec_ref[mask_sol], 1)
except wcs.NoConvergence as e:
self.log.write('Failed to convert sky coordinates to '
'pixel coordinates for solution {:d}: {}'
.format(i + 1, e))
continue
mask_inside = ((xr > 0.5) & (xr < plate_solution.imwidth) &
(yr > 0.5) & (yr < plate_solution.imheight))
num_inside = mask_inside.sum()
xyr = np.vstack((xr[mask_inside], yr[mask_inside])).T
xy_ref = np.vstack((xy_ref, xyr))
sol_ref = np.hstack((sol_ref, np.full(num_inside, i + 1)))
index_ref = np.hstack((index_ref,
np.arange(num_gaia)[mask_sol][mask_inside]))
# Calculate mean astrometric error
sigma1 = u.Quantity([sol['scamp_sigma_1'] for sol in solutions
if sol['scamp_sigma_1'] is not None])
sigma2 = u.Quantity([sol['scamp_sigma_2'] for sol in solutions
if sol['scamp_sigma_2'] is not None])
if len(sigma1) > 0 and len(sigma2) > 0:
mean_scamp_sigma = np.sqrt(sigma1.mean()**2 + sigma2.mean()**2)
else:
mean_scamp_sigma = 2. * u.arcsec
# Crossmatch sources and Gaia stars
coords_plate = np.vstack((self['x_source'], self['y_source'])).T
tolerance = ((5. * mean_scamp_sigma / mean_pixscale)
.to(u.pixel).value)
#if (5. * mean_scamp_sigma) < 2 * u.arcsec:
# tolerance = ((2 * u.arcsec / mean_pixscale)
# .to(u.pixel).value)
tolerance_arcsec = (5. * mean_scamp_sigma).to(u.arcsec).value
self.log.write('Crossmatch tolerance: {:.2f} arcsec ({:.2f} pixels)'
.format(tolerance_arcsec, tolerance), level=4, event=44,
double_newline=False)
ind_plate, ind_ref, ds = crossmatch_cartesian(coords_plate, xy_ref,
tolerance=tolerance)
dist_arcsec = (ds * u.pixel * mean_pixscale).to(u.arcsec).value
ind_gaia = index_ref[ind_ref]
self['solution_num'][ind_plate] = sol_ref[ind_ref]
self['match_radius'][ind_plate] = tolerance_arcsec
self['gaiaedr3_id'][ind_plate] = star_catalog['source_id'][ind_gaia]
self['gaiaedr3_gmag'][ind_plate] = star_catalog['mag'][ind_gaia]
self['gaiaedr3_bpmag'][ind_plate] = star_catalog['mag1'][ind_gaia]
self['gaiaedr3_rpmag'][ind_plate] = star_catalog['mag2'][ind_gaia]
self['gaiaedr3_bp_rp'][ind_plate] = star_catalog['color_index'][ind_gaia]
self['gaiaedr3_dist'][ind_plate] = dist_arcsec
self.num_crossmatch_gaia = len(ind_plate)
# Mask nan values in listed columns
for col in ['gaiaedr3_gmag', 'gaiaedr3_bpmag', 'gaiaedr3_rpmag',
'gaiaedr3_bp_rp', 'gaiaedr3_dist']:
self[col] = MaskedColumn(self[col], mask=np.isnan(self[col]))
# Mask zeros in the ID column
col = 'gaiaedr3_id'
self[col] = MaskedColumn(self[col], mask=(self[col] == 0))
# Store number of crossmatched sources to each solution
grp = self.group_by('solution_num').groups
tab_grp = Table(grp.aggregate(len)['solution_num', 'source_num'])
tab_grp.rename_column('source_num', 'num_gaia_edr3')
for i in np.arange(plate_solution.num_solutions):
solution = solutions[i]
m = tab_grp['solution_num'] == i + 1
if m.sum() > 0:
num_gaia_edr3 = tab_grp['num_gaia_edr3'][m].data[0]
solution['num_gaia_edr3'] = num_gaia_edr3
else:
solution['num_gaia_edr3'] = 0
# Crossmatch: find all neighbours for sources
kdt_ref = KDT(xy_ref)
kdt_plate = KDT(coords_plate)
max_distance = ((20. * mean_scamp_sigma / mean_pixscale)
.to(u.pixel).value)
if (20. * mean_scamp_sigma) < 5 * u.arcsec:
max_distance = (5 * u.arcsec / mean_pixscale).to(u.pixel).value
max_dist_arcsec = (max_distance * u.pixel * mean_pixscale).to(u.arcsec).value
self.log.write('Finding all reference stars around sources within '
'the radius of {:.2f} arcsec ({:.2f} pixels)'
.format(max_dist_arcsec, max_distance),
level=4, event=44)
mtrx = kdt_plate.sparse_distance_matrix(kdt_ref, max_distance)
mtrx_keys = np.array([a for a in mtrx.keys()])
# Check if there are neighbors at all
if len(mtrx_keys) > 0:
k_plate = mtrx_keys[:,0]
k_ref = mtrx_keys[:,1]
dist = np.fromiter(mtrx.values(), dtype=float) * u.pixel
# Construct neighbors table
nbs = Table()
nbs['source_num'] = self['source_num'][k_plate]
nbs['gaiaedr3_id'] = star_catalog['source_id'][index_ref[k_ref]]
nbs['dist'] = dist
nbs['solution_num'] = sol_ref[k_ref]
nbs['x_gaia'] = xy_ref[k_ref,0]
nbs['y_gaia'] = xy_ref[k_ref,1]
# Create the flag_xmatch column by joining the neighbors table
# with the source table
tab = Table()
tab['source_num'] = self['source_num']
tab['gaiaedr3_id'] = MaskedColumn(self['gaiaedr3_id']).filled(0)
tab['flag_xmatch'] = np.int8(1)
jtab = join(nbs, tab, keys=('source_num', 'gaiaedr3_id'),
join_type='left')
jtab['flag_xmatch'] = MaskedColumn(jtab['flag_xmatch']).filled(0)
self.neighbors_gaia = jtab
# Calculate neighbor counts
source_num, cnt = np.unique(nbs['source_num'].data, return_counts=True)
mask = np.isin(self['source_num'], source_num)
ind_mask = np.where(mask)[0]
self['gaiaedr3_neighbors'][ind_mask] = cnt
else:
# Create empty neighbors table
nbs = Table(names=('source_num', 'gaiaedr3_id', 'dist',
'solution_num', 'x_gaia', 'y_gaia',
'flag_xmatch'),
dtype=('i4', 'i8', 'f4', 'i2', 'f8', 'f8', 'i1'))
self.neighbors_gaia = nbs
# Process coordinates again, because solution_num assignments may have changed
self.process_coordinates(plate_solution=plate_solution)
def process_coordinates(self, plate_solution=None):
"""
Calculate HEALPix numbers, (X, Y, Z) on the unit sphere, nearest
neighbor distance, zenith angle, air mass.
Parameters:
-----------
plate_solution : :class:`solve.PlateSolution`
Plate solution with one or more astrometric solutions
"""
self.log.write('Processing source coordinates', level=3, event=60)
if plate_solution is None or plate_solution.num_solutions == 0:
self.log.write('Cannot process source coordinates '
'due to missing astrometric solutions!',
level=2, event=60)
return
# Loop over solutions and transform image coordinates to RA and Dec
for i,solution in enumerate(plate_solution.solutions):
w = wcs.WCS(solution['header_wcs'])
# If there is only one solution, then transform coordinates of
# all sources
if plate_solution.num_solutions == 1:
m = np.isfinite(self['x_source'])
else:
m = self['solution_num'] == i + 1
if m.sum() > 0:
ra, dec = w.all_pix2world(self['x_source'][m],
self['y_source'][m], 1)
self['ra_icrs'][m] = ra
self['dec_icrs'][m] = dec
# Assign astrometric errors
if (solution['scamp_sigma_1'] is not None
and solution['scamp_sigma_2'] is not None):
self['ra_error'][m] = solution['scamp_sigma_1']
self['dec_error'][m] = solution['scamp_sigma_2']
# Check if we have any usable coordinates
bool_finite = (np.isfinite(self['ra_icrs']) &
np.isfinite(self['dec_icrs']))
num_finite = bool_finite.sum()
if num_finite == 0:
self.log.write('No sources with usable coordinates!',
level=2, event=60)
return
ind_finite = np.where(bool_finite)[0]
ra_finite = self['ra_icrs'][ind_finite]
dec_finite = self['dec_icrs'][ind_finite]
# Calculate X, Y, and Z on the unit sphere
# http://www.sdss3.org/svn/repo/idlutils/tags/v5_5_5/pro/coord/angles_to_xyz.pro
phi_rad = np.radians(self['ra_icrs'])
theta_rad = np.radians(90. - self['dec_icrs'])
self['x_sphere'] = np.cos(phi_rad) * np.sin(theta_rad)
self['y_sphere'] = np.sin(phi_rad) * np.sin(theta_rad)
self['z_sphere'] = np.cos(theta_rad)
if have_healpy:
phi_rad = np.radians(self['ra_icrs'][ind_finite])
theta_rad = np.radians(90. - self['dec_icrs'][ind_finite])
hp256 = healpy.ang2pix(256, theta_rad, phi_rad, nest=True)
self['healpix256'][ind_finite] = hp256.astype(np.int32)
hp1024 = healpy.ang2pix(1024, theta_rad, phi_rad, nest=True)
self['healpix1024'][ind_finite] = hp1024.astype(np.int32)
# Loop over solutions and calculate healpix statistics for each
# solution
for i,solution in enumerate(plate_solution.solutions):
# Find all healpixes inside solution corners
lat = solution['skycoord_corners'].dec.deg
lon = solution['skycoord_corners'].ra.deg
vertices = healpy.pixelfunc.ang2vec(lon, lat, lonlat=True)
pix_inside = healpy.query_polygon(1024, vertices,
inclusive=False, nest=True)
tab_inside = Table()
tab_inside['healpix1024'] = pix_inside
tab_inside['num_sources'] = 0
# Select sources that belong to the solution
m = self['solution_num'] == i + 1
# Find all healpixes that have sources
if m.sum() > 0:
grp = self[m].group_by('healpix1024').groups
tab_grp = Table(grp.aggregate(len)['healpix1024',
'source_num'])
tab_grp.rename_column('source_num', 'num_sources')
tab_stack = vstack([tab_grp, tab_inside])
grp_stack = tab_stack.group_by('healpix1024').groups
tab_sum = grp_stack.aggregate(np.sum)
else:
tab_sum = tab_inside
solution['healpix_table'] = tab_sum
# Create SkyCoord from RA and Dec
coords = SkyCoord(ra_finite, dec_finite, unit=(u.deg, u.deg))
# Find nearest neighbours
if num_finite > 1:
_, ds2d, _ = match_coordinates_sky(coords, coords, nthneighbor=2)
matchdist = ds2d.to(u.arcsec).value
self['nn_dist'][ind_finite] = matchdist.astype(np.float32)
# Calculate Galactic and ecliptic coordinates
coords_gal = coords.transform_to(Galactic)
self['gal_lon'][ind_finite] = coords_gal.l.deg
self['gal_lat'][ind_finite] = coords_gal.b.deg
coords_ecl = coords.transform_to(GeocentricMeanEcliptic)
self['ecl_lon'][ind_finite] = coords_ecl.lon.deg
self['ecl_lat'][ind_finite] = coords_ecl.lat.deg
# Suppress ERFA warnings
warnings.filterwarnings('ignore', message='Tried to get polar motions')
warnings.filterwarnings('ignore', message='ERFA function')
# Calculate zenith angle and air mass for each source
# Check for location and single exposure
if (self.platemeta and
self.platemeta['site_latitude'] and
self.platemeta['site_longitude'] and
(self.platemeta['numexp'] == 1) and
self.platemeta['date_avg'] and
self.platemeta['date_avg'][0]):
self.log.write('Calculating zenith angle and air mass for sources',
level=3, event=61)
lon = self.platemeta['site_longitude']
lat = self.platemeta['site_latitude']
height = 0.
if self.platemeta['site_elevation']:
height = self.platemeta['site_elevation']
loc = EarthLocation.from_geodetic(lon, lat, height)
date_avg = Time(self.platemeta['date_avg'][0],
format='isot', scale='ut1')
c_altaz = coords.transform_to(AltAz(obstime=date_avg, location=loc))
self['zenith_angle'][ind_finite] = c_altaz.zen.deg
coszt = np.cos(c_altaz.zen)
airmass = ((1.002432 * coszt**2 + 0.148386 * coszt + 0.0096467)
/ (coszt**3 + 0.149864 * coszt**2 + 0.0102963 * coszt
+ 0.000303978))
self['airmass'][ind_finite] = airmass
# Restore ERFA warnings
warnings.filterwarnings('default', message='Tried to get polar motions')
warnings.filterwarnings('default', message='ERFA function')
def output_csv(self, filename):
"""
Write extracted sources to a CSV file.
"""
outfields = list(_source_meta)
outfmt = [_source_meta[f][1] for f in outfields]
outhdr = ','.join(outfields)
delimiter = ','
np.savetxt(filename, self[outfields], fmt=outfmt,
delimiter=delimiter, header=outhdr, comments='')
|
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
""" Gtk textbuffer with undo functionality """
#Copyright (C) 2009 Florian Heinle
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk
class UndoableInsert(object):
"""something that has been inserted into our textbuffer"""
def __init__(self, text_iter, text, length):
self.offset = text_iter.get_offset()
self.text = text
self.length = length
if self.length > 1 or self.text in ("\r", "\n", " "):
self.mergeable = False
else:
self.mergeable = True
class UndoableDelete(object):
"""something that has ben deleted from our textbuffer"""
def __init__(self, text_buffer, start_iter, end_iter):
self.text = text_buffer.get_text(start_iter, end_iter, True)
self.start = start_iter.get_offset()
self.end = end_iter.get_offset()
# need to find out if backspace or delete key has been used
# so we don't mess up during redo
insert_iter = text_buffer.get_iter_at_mark(text_buffer.get_insert())
if insert_iter.get_offset() <= self.start:
self.delete_key_used = True
else:
self.delete_key_used = False
if self.end - self.start > 1 or self.text in ("\r", "\n", " "):
self.mergeable = False
else:
self.mergeable = True
class UndoableBuffer(Gtk.TextBuffer):
"""text buffer with added undo capabilities
designed as a drop-in replacement for Gtksourceview,
at least as far as undo is concerned"""
def __init__(self):
"""
we'll need empty stacks for undo/redo and some state keeping
"""
Gtk.TextBuffer.__init__(self)
self.undo_stack = []
self.redo_stack = []
self.not_undoable_action = False
self.undo_in_progress = False
self.connect('insert-text', self.on_insert_text)
self.connect('delete-range', self.on_delete_range)
@property
def can_undo(self):
return bool(self.undo_stack)
@property
def can_redo(self):
return bool(self.redo_stack)
def on_insert_text(self, textbuffer, text_iter, text, length):
def can_be_merged(prev, cur):
"""see if we can merge multiple inserts here
will try to merge words or whitespace
can't merge if prev and cur are not mergeable in the first place
can't merge when user set the input bar somewhere else
can't merge across word boundaries"""
WHITESPACE = (' ', '\t')
if not cur.mergeable or not prev.mergeable:
return False
elif cur.offset != (prev.offset + prev.length):
return False
elif cur.text in WHITESPACE and not prev.text in WHITESPACE:
return False
elif prev.text in WHITESPACE and not cur.text in WHITESPACE:
return False
return True
if not self.undo_in_progress:
self.redo_stack = []
if self.not_undoable_action:
return
undo_action = UndoableInsert(text_iter, text, length)
try:
prev_insert = self.undo_stack.pop()
except IndexError:
self.undo_stack.append(undo_action)
return
if not isinstance(prev_insert, UndoableInsert):
self.undo_stack.append(prev_insert)
self.undo_stack.append(undo_action)
return
if can_be_merged(prev_insert, undo_action):
prev_insert.length += undo_action.length
prev_insert.text += undo_action.text
self.undo_stack.append(prev_insert)
else:
self.undo_stack.append(prev_insert)
self.undo_stack.append(undo_action)
def on_delete_range(self, text_buffer, start_iter, end_iter):
def can_be_merged(prev, cur):
"""see if we can merge multiple deletions here
will try to merge words or whitespace
can't merge if prev and cur are not mergeable in the first place
can't merge if delete and backspace key were both used
can't merge across word boundaries"""
WHITESPACE = (' ', '\t')
if not cur.mergeable or not prev.mergeable:
return False
elif prev.delete_key_used != cur.delete_key_used:
return False
elif prev.start != cur.start and prev.start != cur.end:
return False
elif cur.text not in WHITESPACE and \
prev.text in WHITESPACE:
return False
elif cur.text in WHITESPACE and \
prev.text not in WHITESPACE:
return False
return True
if not self.undo_in_progress:
self.redo_stack = []
if self.not_undoable_action:
return
undo_action = UndoableDelete(text_buffer, start_iter, end_iter)
try:
prev_delete = self.undo_stack.pop()
except IndexError:
self.undo_stack.append(undo_action)
return
if not isinstance(prev_delete, UndoableDelete):
self.undo_stack.append(prev_delete)
self.undo_stack.append(undo_action)
return
if can_be_merged(prev_delete, undo_action):
if prev_delete.start == undo_action.start: # delete key used
prev_delete.text += undo_action.text
prev_delete.end += (undo_action.end - undo_action.start)
else: # Backspace used
prev_delete.text = "%s%s" % (undo_action.text,
prev_delete.text)
prev_delete.start = undo_action.start
self.undo_stack.append(prev_delete)
else:
self.undo_stack.append(prev_delete)
self.undo_stack.append(undo_action)
def begin_not_undoable_action(self):
"""don't record the next actions
toggles self.not_undoable_action"""
self.not_undoable_action = True
def end_not_undoable_action(self):
"""record next actions
toggles self.not_undoable_action"""
self.not_undoable_action = False
def undo(self):
"""undo inserts or deletions
undone actions are being moved to redo stack"""
if not self.undo_stack:
return
self.begin_not_undoable_action()
self.undo_in_progress = True
undo_action = self.undo_stack.pop()
self.redo_stack.append(undo_action)
if isinstance(undo_action, UndoableInsert):
start = self.get_iter_at_offset(undo_action.offset)
stop = self.get_iter_at_offset(
undo_action.offset + undo_action.length
)
self.delete(start, stop)
self.place_cursor(start)
else:
start = self.get_iter_at_offset(undo_action.start)
self.insert(start, undo_action.text)
stop = self.get_iter_at_offset(undo_action.end)
if undo_action.delete_key_used:
self.place_cursor(start)
else:
self.place_cursor(stop)
self.end_not_undoable_action()
self.undo_in_progress = False
def redo(self):
"""redo inserts or deletions
redone actions are moved to undo stack"""
if not self.redo_stack:
return
self.begin_not_undoable_action()
self.undo_in_progress = True
redo_action = self.redo_stack.pop()
self.undo_stack.append(redo_action)
if isinstance(redo_action, UndoableInsert):
start = self.get_iter_at_offset(redo_action.offset)
self.insert(start, redo_action.text)
new_cursor_pos = self.get_iter_at_offset(
redo_action.offset + redo_action.length
)
self.place_cursor(new_cursor_pos)
else:
start = self.get_iter_at_offset(redo_action.start)
stop = self.get_iter_at_offset(redo_action.end)
self.delete(start, stop)
self.place_cursor(start)
self.end_not_undoable_action()
self.undo_in_progress = False
|
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import os.path
import pytest
from unittest import TestCase
import shutil
import zoo.orca.data
import zoo.orca.data.pandas
from zoo.orca import OrcaContext
from zoo.common.nncontext import *
from zoo.orca.data.image import write_tfrecord, read_tfrecord
class TestSparkBackend(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
def test_header_and_names(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
# Default header="infer"
data_shard = zoo.orca.data.pandas.read_csv(file_path)
data = data_shard.collect()
assert len(data) == 2, "number of shard should be 2"
df = data[0]
assert "location" in df.columns
file_path = os.path.join(self.resource_path, "orca/data/no_header.csv")
# No header, default to be '0','1','2'
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=None)
df2 = data_shard.collect()[0]
assert '0' in df2.columns and '2' in df2.columns
# Specify names as header
data_shard = zoo.orca.data.pandas.read_csv(
file_path, header=None, names=["ID", "sale_price", "location"])
df3 = data_shard.collect()[0]
assert "sale_price" in df3.columns
def test_usecols(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path, usecols=[0, 1])
data = data_shard.collect()
df = data[0]
assert "sale_price" in df.columns
assert "location" not in df.columns
data_shard = zoo.orca.data.pandas.read_csv(file_path, usecols=["ID"])
data = data_shard.collect()
df2 = data[0]
assert "ID" in df2.columns and "location" not in df2.columns
def filter_col(name):
return name == "sale_price"
data_shard = zoo.orca.data.pandas.read_csv(file_path, usecols=filter_col)
data = data_shard.collect()
df3 = data[0]
assert "sale_price" in df3.columns and "location" not in df3.columns
def test_dtype(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path, dtype="float")
data = data_shard.collect()
df = data[0]
assert df.location.dtype == "float64"
assert df.ID.dtype == "float64"
data_shard = zoo.orca.data.pandas.read_csv(file_path, dtype={"sale_price": np.float32})
data = data_shard.collect()
df2 = data[0]
assert df2.sale_price.dtype == "float32" and df2.ID.dtype == "int64"
def test_squeeze(self):
import pandas as pd
file_path = os.path.join(self.resource_path, "orca/data/single_column.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path, squeeze=True)
data = data_shard.collect()
df = data[0]
assert isinstance(df, pd.Series)
def test_index_col(self):
file_path = os.path.join(self.resource_path, "orca/data/csv/morgage1.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path, index_col="ID")
data = data_shard.collect()
df = data[0]
assert 100529 in df.index
def test_mix(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=0, names=['user', 'item'],
usecols=[0, 1])
data = data_shard.collect()
df = data[0]
assert "user" in df.columns
assert "item" in df.columns
with self.assertRaises(Exception) as context:
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=0,
names=['ID', 'location'], usecols=["ID"])
data = data_shard.collect()
self.assertTrue('Passed names did not match usecols'
in str(context.exception))
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item'], usecols=[0, 1],
dtype={0: np.float32, 1: np.int32})
data = data_shard.collect()
df2 = data[0]
assert df2.user.dtype == "float32" and df2.item.dtype == "int32"
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item', 'location'],
usecols=[1, 2])
data = data_shard.collect()
df2 = data[0]
assert "user" not in df2.columns
assert "item" in df2.columns
assert "location" in df2.columns
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item', 'rating'],
usecols=['user', 'item'],
dtype={0: np.float32, 1: np.int32})
data = data_shard.collect()
df2 = data[0]
assert df2.user.dtype == "float32" and df2.item.dtype == "int32"
with self.assertRaises(Exception) as context:
data_shard = zoo.orca.data.pandas.read_csv(file_path, header=0,
names=['user', 'item'], usecols=[0, 1],
dtype={1: np.float32, 2: np.int32})
data = data_shard.collect()
self.assertTrue('column index to be set type is not in current dataframe'
in str(context.exception))
def test_read_invalid_path(self):
file_path = os.path.join(self.resource_path, "abc")
with self.assertRaises(Exception) as context:
xshards = zoo.orca.data.pandas.read_csv(file_path)
# This error is raised by pyspark.sql.utils.AnalysisException
self.assertTrue('Path does not exist' in str(context.exception))
def test_read_json(self):
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = zoo.orca.data.pandas.read_json(file_path)
data = data_shard.collect()
df = data[0]
assert "timestamp" in df.columns and "value" in df.columns
data_shard = zoo.orca.data.pandas.read_json(file_path, names=["time", "value"])
data = data_shard.collect()
df2 = data[0]
assert "time" in df2.columns and "value" in df2.columns
data_shard = zoo.orca.data.pandas.read_json(file_path, usecols=[0])
data = data_shard.collect()
df3 = data[0]
assert "timestamp" in df3.columns and "value" not in df3.columns
data_shard = zoo.orca.data.pandas.read_json(file_path, dtype={"value": "float"})
data = data_shard.collect()
df4 = data[0]
assert df4.value.dtype == "float64"
def test_read_parquet(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
sc = init_nncontext()
from pyspark.sql.functions import col
spark = OrcaContext.get_spark_session()
df = spark.read.csv(file_path, header=True)
df = df.withColumn('sale_price', col('sale_price').cast('int'))
temp = tempfile.mkdtemp()
df.write.parquet(os.path.join(temp, "test_parquet"))
data_shard2 = zoo.orca.data.pandas.read_parquet(os.path.join(temp, "test_parquet"))
assert data_shard2.num_partitions() == 2, "number of shard should be 2"
data = data_shard2.collect()
df = data[0]
assert "location" in df.columns
data_shard2 = zoo.orca.data.pandas.read_parquet(os.path.join(temp, "test_parquet"),
columns=['ID', 'sale_price'])
data = data_shard2.collect()
df = data[0]
assert len(df.columns) == 2
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
schema = StructType([StructField("ID", StringType(), True),
StructField("sale_price", IntegerType(), True),
StructField("location", StringType(), True)])
data_shard3 = zoo.orca.data.pandas.read_parquet(os.path.join(temp, "test_parquet"),
columns=['ID', 'sale_price'],
schema=schema)
data = data_shard3.collect()
df = data[0]
assert str(df['sale_price'].dtype) == 'int64'
shutil.rmtree(temp)
def test_write_read_imagenet(self):
raw_data = os.path.join(self.resource_path, "imagenet_to_tfrecord")
temp_dir = tempfile.mkdtemp()
try:
write_tfrecord(format="imagenet", imagenet_path=raw_data, output_path=temp_dir)
data_dir = os.path.join(temp_dir, "train")
train_dataset = read_tfrecord(format="imagenet", path=data_dir, is_training=True)
train_dataset.take(1)
finally:
shutil.rmtree(temp_dir)
if __name__ == "__main__":
pytest.main([__file__])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.mongo import MongoDB
from lib.protomemes import get_protomemes
from lib.vectorizer import vectorize_text, tdidf, create_dictionary,get_frequency_vectors,create_matrix_from_vectors
from sklearn.metrics.pairwise import cosine_similarity
from lib.plot import augmented_dendrogram
from time import time
from multiprocessing import Pool
import numpy as np
from scipy import array as sparray
from scipy.cluster.hierarchy import linkage, dendrogram, leaves_list
from scipy.cluster.vq import kmeans,vq
from scipy.spatial.distance import pdist, squareform
import pylab
import matplotlib.pyplot as plt
# Variables
# collection="week1"
# Connect to Mongo
# db=MongoDB("weibodata").db
# count = db[collection].count()
t00=time()
count=100
pms=get_protomemes(None,count)
# convert to numpy array
protomemes=np.array(pms)
print "%d protomemes obtained." % len(protomemes)
print
print
print "Label matrix with protomemes description"
try:
labels=[ p["value"]["type"] +" : "+p["_id"] for p in protomemes ]
except KeyError:
labels=[]
print " length of labels %d"%len(labels)
print '#'*40
print "Step 1 : Compute all vectors from protomemes"
##################################################################
# Text similarity (TF-IDF)
#
print '-'*40
print "Computing text vectors from protomemes"
print
corpus=vectorize_text(protomemes)
text_matrix=tdidf(corpus)
##################################################################
# Diffusion similarity
#
print '-'*40
print 'Vectorizing diffusion using RT/mentions users list'
print
print " Creating dictionary for diffusion"
diffusion=[]
for proto in protomemes:
diffusion.append(proto["value"]["diffusion"])
dictionary=create_dictionary(diffusion)
vector_diffusion_corpus=get_frequency_vectors(diffusion,dictionary)
diffusion_matrix=create_matrix_from_vectors(vector_diffusion_corpus)
##################################################################
# Binary tweets
#
print '-'*40
print "Vectorizing tweets ids"
print
print " Creating dictionary for binary tweets"
binary_tweets=[]
for proto in protomemes:
binary_tweets.append(proto["value"]["tweets"])
tweet_dic=create_dictionary(binary_tweets)
tweet_corpus=get_frequency_vectors(binary_tweets,tweet_dic)
tweets_matrix=create_matrix_from_vectors(tweet_corpus)
##################################################################
# User similarity
# TODO : add all ids of user to protomemes (should be mined in prepare.py)
#
# print '-'*40
# print "TODO : Tweet simple similarity "
# print
##################################################################
# STEP 2
# Compute similarities
#
print "Step 2 : Compare and combine matrix to detect clusters"
print '#'*40
print
print " text_matrix - n_samples: %d, n_features: %d "%text_matrix.shape
print " tweets_matrix - n_samples: %d, n_features: %d "%tweets_matrix.shape
print " diffusion_matrix - n_samples: %d, n_features: %d "%diffusion_matrix.shape
print
print " Compute cosine similarities from corpus"
def compute_cosine(matrix):
"""Worker function for multiprocessing"""
print ' worker : computation process started'
t1=time()
cos=[cosine_similarity(matrix, pm)[0] for pm in matrix]
# sleep(2)
print " Cosine computed in",
print " in %fs"%(time()-t1)
return cos
def compute_similarities_using_multiple_processes(l):
'''process the test list elements in parallel'''
print ' creating multi-process pool'
pool = Pool()
results = pool.map(compute_cosine, l)
return results
t0=time()
# multi processing to improve computing
results=compute_cosine_using_multiple_processes([text_matrix,diffusion_matrix,tweets_matrix])
print " done in %fs" % (time() - t0)
text_sim=results[0]
diffusion_sim=results[1]
tweets_sim=results[2]
# TODO : fallback on single-threaded computing if CPU doesn't support multiprocessing
# text_sim=[cosine_similarity(pm, text_matrix)[0] for pm in text_matrix]
# diffusion_sim=[cosine_similarity(pm, diffusion_matrix)[0] for pm in diffusion_matrix]
# tweets_sim= [cosine_similarity(pm, tweets_matrix) for pm in tweets_matrix]
print
# linear combination of similarity measures,
print "Starting linear combination of similarity measures,"
wt = 0.0
wc = 0.7
wu = 0.1
wd = 0.2
if wt+wc+wu+wd != 1:
raise ValueError("scale factors sum should equals 1")
# TODO : add missing parameters
print " weighting and scaling up matrix "
combi=wc*np.array(text_sim) +wd*np.array(diffusion_sim) +wt*np.array(tweets_sim)
print " combination length :%d " % len(combi)
print " calculate matrix w average linkage algorithm"
linkage_matrix=linkage(combi, method='average')
print " clusters: n_samples: %d, n_features: %d" % linkage_matrix.shape
# get order from dendrogram leaves
reordered = leaves_list(linkage_matrix)
# reorder the data matrix and row headers according to leaves
ordered_data_matrix = combi[reordered,:]
# do the same for the row headers
row_headers = np.array(labels)
ordered_row_headers = row_headers[reordered,:]
print
print " plotting data and generating images"
# use vq() to get as assignment for each obs.
# assignment,cdist = vq(clusters,clusters)
# plt.scatter(clusters[:,0], clusters[:,1], c=assignment)
# plt.show()
# plt.clf()
show_leaf_counts = False
ddata = augmented_dendrogram(linkage_matrix,
color_threshold=1,
p=60,
truncate_mode='lastp',
show_leaf_counts=show_leaf_counts,
)
plt.title("Dendogram for %s protomemes"%len(protomemes))
plt.show()
print " everything done in %fs" % (time() - t00)
|
|
"""NAMD parser tests.
"""
from os.path import basename
from re import search
import bz2
import pytest
from alchemlyb.parsing.namd import extract_u_nk
from alchemtest.namd import load_tyr2ala
from alchemtest.namd import load_idws
from alchemtest.namd import load_restarted
from alchemtest.namd import load_restarted_reversed
# Indices of lambda values in the following line in NAMD fepout files:
# #NEW FEP WINDOW: LAMBDA SET TO 0.6 LAMBDA2 0.7 LAMBDA_IDWS 0.5
LAMBDA1_IDX_NEW = 6
LAMBDA2_IDX_NEW = 8
LAMBDA_IDWS_IDX_NEW = 10
# Indices of lambda values in the following type of line in NAMD fepout files:
# #Free energy change for lambda window [ 0.6 0.7 ] is 0.12345 ; net change until now is 0.12345
LAMBDA1_IDX_FREE = 7
LAMBDA2_IDX_FREE = 8
@pytest.fixture(scope="module")
def dataset():
return load_tyr2ala()
@pytest.mark.parametrize("direction,shape",
[('forward', (21021, 21)),
('backward', (21021, 21)),
])
def test_u_nk(dataset, direction, shape):
"""Test that u_nk has the correct form when extracted from files.
"""
for filename in dataset['data'][direction]:
u_nk = extract_u_nk(filename, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == shape
def test_u_nk_idws():
"""Test that u_nk has the correct form when extracted from files.
"""
filenames = load_idws()['data']['forward']
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (29252, 11)
@pytest.fixture(params=[load_restarted, load_restarted_reversed])
def restarted_dataset(request):
return request.param()
def _corrupt_fepout(fepout_in, params, tmp_path):
"""Corrupts specific lines in a fepout file according to each line's prefix,
using caller-supplied functions.
Parameters
----------
fepout_in: str
Path to fepout file to be modified. This file will not be overwritten.
params: list of tuples
For each tuple, the first element must be a str that will be passed to
startswith() to identify the line(s) to modify (e.g. "#NEW"). The
second element must be a function that accepts a list of strs which is
the output of running split() on the identified line and returns
a modified list of tokens that will be reassembled into the line to be
output.
The function may return None if this line should not be written. This
can be used to delete lines from the fepout.
Returns
-------
The name of a temporary file which pytest will unlink.
"""
fepout_out = tmp_path / basename(fepout_in)
with bz2.open(fepout_out, 'wt') as f_out:
with bz2.open(fepout_in, 'rt') as f_in:
for line in f_in:
for prefix, func in params:
if line.startswith(prefix):
tokens_out = func(line.split())
line = ' '.join(tokens_out) + '\n' if tokens_out is not None else None
if line is not None:
f_out.write(line)
return str(fepout_out)
@pytest.fixture
def restarted_dataset_inconsistent(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset where lambda1 and lambda2 at start and end of
a window are different."""
filenames = sorted(restarted_dataset['data']['both'])
changed = False
def func_free_line(l):
nonlocal changed
if float(l[7]) >= 0.7 and float(l[7]) < 0.9:
l[7] = str(float(l[7]) + 0.0001)
changed = True
return l
for i in range(len(filenames)):
filenames[i] = _corrupt_fepout(filenames[i], [('#Free', func_free_line)], tmp_path)
# Only actually modify one window so we don't trigger the wrong exception
if changed is True:
break
# Don't directly modify the glob object
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_idws_without_lambda_idws(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset where the first window has IDWS data
but no lambda_idws.
"""
# First window won't have any IDWS data so we just drop all its files and fudge the lambdas
# in the next window to include 0.0 or 1.0 (as appropriate) so we still have a nominally complete calculation
filenames = [x for x in sorted(restarted_dataset['data']['both']) if search('000[a-z]?.fepout', x) is None]
def func_new_line(l):
if float(l[LAMBDA1_IDX_NEW]) > 0.5: # 1->0 (reversed) calculation
l[LAMBDA1_IDX_NEW] == '1.0'
else: # regular 0->1 calculation
l[LAMBDA1_IDX_NEW] = '0.0'
# Drop the lambda_idws
return l[:9]
def func_free_line(l):
if float(l[LAMBDA1_IDX_FREE]) > 0.5: # 1->0 (reversed) calculation
l[LAMBDA1_IDX_FREE] == '1.0'
else: # regular 0->1 calculation
l[LAMBDA1_IDX_FREE] = '0.0'
return l
filenames[0] = _corrupt_fepout(filenames[0], [('#NEW', func_new_line), ('#Free', func_free_line)], tmp_path)
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_toomany_lambda2(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset, where there are too many lambda2 values for a
given lambda1."""
filenames = sorted(restarted_dataset['data']['both'])
# For the same l1 and lidws we retain old lambda2 values thus ensuring a collision
# Also, don't make a window where lambda1 >= lambda2 because this will trigger the
# "direction changed" exception instead
def func_new_line(l):
if float(l[LAMBDA2_IDX_NEW]) <= 0.2:
return l
l[LAMBDA1_IDX_NEW] = '0.2'
if len(l) > 9 and l[9] == 'LAMBDA_IDWS':
l[LAMBDA_IDWS_IDX_NEW] = '0.1'
return l
def func_free_line(l):
if float(l[LAMBDA2_IDX_FREE]) <= 0.2:
return l
l[LAMBDA1_IDX_FREE] = '0.2'
return l
for i in range(len(filenames)):
filenames[i] = \
_corrupt_fepout(filenames[i], [('#NEW', func_new_line), ('#Free', func_free_line)], tmp_path)
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_toomany_lambda_idws(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset, where there are too many lambda2 values for a
given lambda1."""
filenames = sorted(restarted_dataset['data']['both'])
# For the same lambda1 and lambda2 we retain the first set of lambda1/lambda2 values
# and replicate them across all windows thus ensuring that there will be more than
# one lambda_idws value for a given lambda1 and lambda2
this_lambda1, this_lambda2 = None, None
def func_new_line(l):
nonlocal this_lambda1, this_lambda2
if this_lambda1 is None:
this_lambda1, this_lambda2 = l[LAMBDA1_IDX_NEW], l[LAMBDA2_IDX_NEW]
# Ensure that changing these lambda values won't cause a reversal in direction and trigger
# an exception we're not trying to test here
if len(l) > 9 and float(l[LAMBDA_IDWS_IDX_NEW]) < 0.5:
l[LAMBDA1_IDX_NEW], l[LAMBDA2_IDX_NEW] = this_lambda1, this_lambda2
return l
def func_free_line(l):
l[LAMBDA1_IDX_FREE], l[LAMBDA2_IDX_FREE] = this_lambda1, this_lambda2
return l
for i in range(len(filenames)):
filenames[i] = _corrupt_fepout(filenames[i], [('#NEW', func_new_line)], tmp_path)
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_direction_changed(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset, with one window where the lambda values are reversed."""
filenames = sorted(restarted_dataset['data']['both'])
def func_new_line(l):
l[6], l[8], l[10] = l[10], l[8], l[6]
return l
def func_free_line(l):
l[7], l[8] = l[8], l[7]
return l
# Reverse the direction of lambdas for this window
idx_to_corrupt = filenames.index(sorted(filenames)[-3])
fname1 = _corrupt_fepout(filenames[idx_to_corrupt], [('#NEW', func_new_line), ('#Free', func_free_line)], tmp_path)
filenames[idx_to_corrupt] = fname1
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_all_windows_truncated(restarted_dataset, tmp_path):
"""Returns dataset where all windows are truncated (no #Free... footer lines)."""
filenames = sorted(restarted_dataset['data']['both'])
def func_free_line(l):
return None
for i in range(len(filenames)):
filenames[i] = _corrupt_fepout(filenames[i], [('#Free', func_free_line)], tmp_path)
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_last_window_truncated(restarted_dataset, tmp_path):
"""Returns dataset where the last window is truncated (no #Free... footer line)."""
filenames = sorted(restarted_dataset['data']['both'])
def func_free_line(l):
return None
filenames[-1] = _corrupt_fepout(filenames[-1], [('#Free', func_free_line)], tmp_path)
restarted_dataset['data']['both'] = filenames
return restarted_dataset
def test_u_nk_restarted():
"""Test that u_nk has the correct form when extracted from an IDWS
FEP run that includes terminations and restarts.
"""
filenames = load_restarted()['data']['both']
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (30061, 11)
def test_u_nk_restarted_missing_window_header(tmp_path):
"""Test that u_nk has the correct form when a #NEW line is missing from the restarted dataset
and the parser has to infer lambda_idws for that window."""
filenames = sorted(load_restarted()['data']['both'])
# Remove "#NEW" line
filenames[4] = _corrupt_fepout(filenames[4], [('#NEW', lambda l: None),], tmp_path)
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (30061, 11)
def test_u_nk_restarted_reversed():
filenames = load_restarted_reversed()['data']['both']
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (30170, 11)
def test_u_nk_restarted_reversed_missing_window_header(tmp_path):
"""Test that u_nk has the correct form when a #NEW line is missing from the restarted_reversed dataset
and the parser has to infer lambda_idws for that window."""
filenames = sorted(load_restarted_reversed()['data']['both'])
# Remove "#NEW" line
filenames[4] = _corrupt_fepout(filenames[4], [('#NEW', lambda l: None),], tmp_path)
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (30170, 11)
def test_u_nk_restarted_direction_changed(restarted_dataset_direction_changed):
"""Test that when lambda values change direction within a dataset, parsing throws an error."""
with pytest.raises(ValueError, match='Lambda values change direction'):
u_nk = extract_u_nk(restarted_dataset_direction_changed['data']['both'], T=300)
def test_u_nk_restarted_idws_without_lambda_idws(restarted_dataset_idws_without_lambda_idws):
"""Test that when the first window has IDWS data but no lambda_idws, parsing throws an error.
In this situation, the lambda_idws cannot be inferred, because there's no previous lambda
value available.
"""
with pytest.raises(ValueError, match='IDWS data present in first window but lambda_idws not included'):
u_nk = extract_u_nk(restarted_dataset_idws_without_lambda_idws['data']['both'], T=300)
def test_u_nk_restarted_inconsistent(restarted_dataset_inconsistent):
"""Test that when lambda values are inconsistent between start and end of a single window,
parsing throws an error.
"""
with pytest.raises(ValueError, match='Inconsistent lambda values within the same window'):
u_nk = extract_u_nk(restarted_dataset_inconsistent['data']['both'], T=300)
def test_u_nk_restarted_toomany_lambda_idws(restarted_dataset_toomany_lambda_idws):
"""Test that when there is more than one lambda_idws for a given lambda1, parsing throws an error."""
with pytest.raises(ValueError, match='More than one lambda_idws value for a particular lambda1'):
u_nk = extract_u_nk(restarted_dataset_toomany_lambda_idws['data']['both'], T=300)
def test_u_nk_restarted_toomany_lambda2(restarted_dataset_toomany_lambda2):
"""Test that when there is more than one lambda2 for a given lambda1, parsing throws an error."""
with pytest.raises(ValueError, match='More than one lambda2 value for a particular lambda1'):
u_nk = extract_u_nk(restarted_dataset_toomany_lambda2['data']['both'], T=300)
def test_u_nk_restarted_all_windows_truncated(restarted_dataset_all_windows_truncated):
"""Test that when there is more than one lambda2 for a given lambda1, parsing throws an error."""
with pytest.raises(ValueError, match='New window begun after truncated window'):
u_nk = extract_u_nk(restarted_dataset_all_windows_truncated['data']['both'], T=300)
def test_u_nk_restarted_last_window_truncated(restarted_dataset_last_window_truncated):
"""Test that when there is more than one lambda2 for a given lambda1, parsing throws an error."""
with pytest.raises(ValueError, match='Last window is truncated'):
u_nk = extract_u_nk(restarted_dataset_last_window_truncated['data']['both'], T=300)
|
|
import os
import shutil
import tempfile
from uuid import uuid4
import ethereum.keys
from ethereum.slogging import get_logger
from ethereum.utils import decode_hex, remove_0x_head
from devp2p.app import BaseApp
import pytest
from pyethapp.accounts import Account, AccountsService, DEFAULT_COINBASE
# reduce key derivation iterations
ethereum.keys.PBKDF2_CONSTANTS['c'] = 100
log = get_logger('tests.account_service')
@pytest.fixture()
def app(request):
app = BaseApp(config=dict(accounts=dict(keystore_dir=tempfile.mkdtemp())))
AccountsService.register_with_app(app)
def fin():
# cleanup temporary keystore directory
assert app.config['accounts']['keystore_dir'].startswith(tempfile.gettempdir())
shutil.rmtree(app.config['accounts']['keystore_dir'])
log.debug('cleaned temporary keystore dir', dir=app.config['accounts']['keystore_dir'])
request.addfinalizer(fin)
return app
@pytest.fixture()
def privkey():
return 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
@pytest.fixture()
def password():
return 'secret'
@pytest.fixture()
def uuid():
return str(uuid4())
@pytest.fixture()
def account(privkey, password, uuid):
return Account.new(password, privkey, uuid)
def test_empty(app):
s = app.services.accounts
assert len(s) == 0
assert len(s.accounts_with_address) == 0
assert len(s.unlocked_accounts) == 0
assert s.accounts == []
def test_add_account(app, account):
s = app.services.accounts
assert len(s) == 0
s.add_account(account, store=False)
assert len(s) == 1
assert s.accounts == [account]
assert s[account.address] == account
assert s.unlocked_accounts == [account]
assert s.accounts_with_address == [account]
assert s.get_by_id(account.uuid) == account
def test_add_locked_account(app, account, password):
s = app.services.accounts
account.lock()
assert account.address is not None
s.add_account(account, store=False)
assert s.accounts == [account]
assert s[account.address] == account
assert len(s.unlocked_accounts) == 0
assert s.accounts_with_address == [account]
assert s.get_by_id(account.uuid) == account
account.unlock(password)
assert s.unlocked_accounts == [account]
def test_add_account_without_address(app, account, password):
s = app.services.accounts
account.lock()
address = account.address
account._address = None
s.add_account(account, store=False)
assert s.accounts == [account]
assert len(s.unlocked_accounts) == 0
assert len(s.accounts_with_address) == 0
with pytest.raises(KeyError):
s[address]
assert s.get_by_id(account.uuid) == account
account._address = address # restore address for following tests
account.unlock(password)
def test_add_account_twice(app, account):
s = app.services.accounts
s.add_account(account, store=False)
with pytest.raises(ValueError):
s.add_account(account, store=False)
assert len(s.accounts) == 1
uuid = account.uuid
account.uuid = None
s.add_account(account, store=False)
assert len(s) == 2
assert s.accounts == [account, account]
assert s[account.address] == account
assert s.unlocked_accounts == [account, account]
assert s.accounts_with_address == [account, account]
account.uuid = uuid
def test_lock_after_adding(app, account, password):
s = app.services.accounts
s.add_account(account, store=False)
assert s.unlocked_accounts == [account]
account.lock()
assert len(s.unlocked_accounts) == 0
account.unlock(password)
assert s.unlocked_accounts == [account]
def test_find(app, account):
s = app.services.accounts
s.add_account(account, store=False)
assert len(s) == 1
assert s.find('1') == account
assert s.find(account.address.encode('hex')) == account
assert s.find(account.address.encode('hex').upper()) == account
assert s.find('0x' + account.address.encode('hex')) == account
assert s.find('0x' + account.address.encode('hex').upper()) == account
assert s.find(account.uuid) == account
assert s.find(account.uuid.upper()) == account
with pytest.raises(ValueError):
s.find('')
with pytest.raises(ValueError):
s.find('aabbcc')
with pytest.raises(ValueError):
s.find('xx' * 20)
with pytest.raises(ValueError):
s.find('0x' + 'xx' * 20)
with pytest.raises(KeyError):
s.find('ff' * 20)
with pytest.raises(KeyError):
s.find('0x' + 'ff' * 20)
with pytest.raises(KeyError):
s.find(str(uuid4()))
def test_store(app, account):
s = app.services.accounts
account.path = os.path.join(app.config['accounts']['keystore_dir'], 'account1')
s.add_account(account, include_id=True, include_address=True)
assert os.path.exists(account.path)
account_reloaded = Account.load(account.path)
assert account_reloaded.uuid is not None
assert account_reloaded.address is not None
assert account_reloaded.uuid == account.uuid
assert account_reloaded.address == account.address
assert account_reloaded.privkey is None
assert account_reloaded.path == account.path
assert account.privkey is not None
account.path = None
def test_store_overwrite(app, account):
s = app.services.accounts
uuid = account.uuid
account.uuid = None
account.path = os.path.join(app.config['accounts']['keystore_dir'], 'account1')
account2 = Account(account.keystore)
account2.path = os.path.join(app.config['accounts']['keystore_dir'], 'account2')
s.add_account(account, store=True)
with pytest.raises(IOError):
s.add_account(account, store=True)
s.add_account(account2, store=True)
account.uuid = uuid
account.path = None
def test_store_dir(app, account):
s = app.services.accounts
uuid = account.uuid
account.uuid = None
paths = [os.path.join(app.config['accounts']['keystore_dir'], p) for p in [
'some/sub/dir/account1',
'some/sub/dir/account2',
'account1',
]]
for path in paths:
new_account = Account(account.keystore, path=path)
s.add_account(new_account)
for path in paths:
new_account = Account(account.keystore, path=path)
with pytest.raises(IOError):
s.add_account(new_account)
account.uuid = uuid
def test_store_private(app, account, password):
s = app.services.accounts
account.path = os.path.join(app.config['accounts']['keystore_dir'], 'account1')
s.add_account(account, include_id=False, include_address=False)
account_reloaded = Account.load(account.path)
assert account_reloaded.address is None
assert account_reloaded.uuid is None
account_reloaded.unlock(password)
assert account_reloaded.address == account.address
assert account_reloaded.uuid is None
account.path = None
def test_store_absolute(app, account):
s = app.services.accounts
tmpdir = tempfile.mkdtemp()
account.path = os.path.join(tmpdir, 'account1')
assert os.path.isabs(account.path)
s.add_account(account)
assert os.path.exists(account.path)
account_reloaded = Account.load(account.path)
assert account_reloaded.address == account.address
shutil.rmtree(tmpdir)
account.path = None
def test_restart_service(app, account, password):
s = app.services.accounts
account.path = os.path.join(app.config['accounts']['keystore_dir'], 'account1')
s.add_account(account)
app.services.pop('accounts')
AccountsService.register_with_app(app)
s = app.services.accounts
assert len(s) == 1
reloaded_account = s.accounts[0]
assert reloaded_account.path == account.path
assert reloaded_account.address == account.address
assert reloaded_account.uuid == account.uuid
assert reloaded_account.privkey is None
assert reloaded_account.pubkey is None
reloaded_account.unlock(password)
assert reloaded_account.privkey == account.privkey
assert reloaded_account.pubkey == account.pubkey
account.path = None
def test_account_sorting(app):
keystore_dummy = {}
paths = [
'/absolute/path/b',
'/absolute/path/c',
'/absolute/path/letter/e',
'/absolute/path/letter/d',
'/letter/f',
'/absolute/path/a',
None
]
paths_sorted = sorted(paths)
s = app.services.accounts
for path in paths:
s.add_account(Account(keystore_dummy, path=path), store=False)
assert [account.path for account in s.accounts] == paths_sorted
assert [s.find(str(i)).path for i in xrange(1, len(paths) + 1)] == paths_sorted
def test_update(app, account, password):
s = app.services.accounts
path = os.path.join(app.config['accounts']['keystore_dir'], 'update_test')
address = account.address
privkey = account.privkey
pubkey = account.pubkey
uuid = account.uuid
with pytest.raises(ValueError):
s.update_account(account, 'pw2')
s.add_account(account, store=False)
with pytest.raises(ValueError):
s.update_account(account, 'pw2')
s.accounts.remove(account)
account.path = path
s.add_account(account, store=True)
account.lock()
with pytest.raises(ValueError):
s.update_account(account, 'pw2')
account.unlock(password)
s.update_account(account, 'pw2')
assert account.path == path
assert account.address == address
assert account.privkey == privkey
assert account.pubkey == pubkey
assert account.uuid == uuid
assert not account.locked
assert account in s.accounts
account.lock()
with pytest.raises(ValueError):
account.unlock(password)
account.unlock('pw2')
assert not account.locked
assert os.listdir(app.config['accounts']['keystore_dir']) == ['update_test']
files = ['update_test~' + str(i) for i in xrange(20)]
files.append('update_test~')
for filename in files:
# touch files
open(os.path.join(app.config['accounts']['keystore_dir'], filename), 'w').close()
s.update_account(account, 'pw3')
assert set(os.listdir(app.config['accounts']['keystore_dir'])) == set(files + ['update_test'])
account.path = None
def test_coinbase(app, account):
s = app.services.accounts
# coinbase not configured at all
assert s.coinbase == DEFAULT_COINBASE
# coinbase from first account
s.add_account(account, store=False)
app.config['accounts']['must_include_coinbase'] = True
assert s.coinbase == account.address
app.config['accounts']['must_include_coinbase'] = False
assert s.coinbase == account.address
# coinbase configured
app.config['pow'] = {'coinbase_hex': account.address.encode('hex')}
app.config['accounts']['must_include_coinbase'] = True
assert s.coinbase == account.address
app.config['accounts']['must_include_coinbase'] = False
assert s.coinbase == account.address
for invalid_coinbase in [123, '\x00' * 20, '\x00' * 40, '', 'aabbcc', 'aa' * 19, 'ff' * 21]:
app.config['pow'] = {'coinbase_hex': invalid_coinbase}
app.config['accounts']['must_include_coinbase'] = False
with pytest.raises(ValueError):
s.coinbase
app.config['accounts']['must_include_coinbase'] = True
with pytest.raises(ValueError):
s.coinbase
for valid_coinbase in ['00' * 20, 'ff' * 20, '0x' + 'aa' * 20]:
app.config['pow'] = {'coinbase_hex': valid_coinbase}
app.config['accounts']['must_include_coinbase'] = False
assert s.coinbase == decode_hex(remove_0x_head(valid_coinbase))
app.config['accounts']['must_include_coinbase'] = True
with pytest.raises(ValueError):
s.coinbase
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import tarfile
import tempfile
import time
import uuid
import zipfile
from contextlib import closing, contextmanager
from six import string_types
from pants.util.dirutil import safe_delete
@contextmanager
def environment_as(**kwargs):
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.6'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key, val):
if val is not None:
os.environ[key] = val
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
@contextmanager
def temporary_dir(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary directory.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary directory.
:param bool cleanup: Whether or not to clean up the temporary directory.
"""
path = tempfile.mkdtemp(dir=root_dir)
try:
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary file and returns its path.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup) as fd:
fd.close()
yield fd.name
@contextmanager
def temporary_file(root_dir=None, cleanup=True, suffix=''):
"""
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
"""
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False) as fd:
try:
yield fd
finally:
if cleanup:
safe_delete(fd.name)
@contextmanager
def safe_file(path, suffix=None, cleanup=True):
"""A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param str suffix: Use this suffix to create the copy. Otherwise use a random string.
:param bool cleanup: Whether or not to clean up the copy.
"""
safe_path = '{0}.{1}'.format(path, suffix or uuid.uuid4())
if os.path.exists(path):
shutil.copy(path, safe_path)
try:
yield safe_path
if cleanup:
shutil.move(safe_path, path)
else:
shutil.copy(safe_path, path)
finally:
if cleanup:
safe_delete(safe_path)
@contextmanager
def pushd(directory):
"""
A with-context that encapsulates pushd/popd.
"""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def open_zip(path_or_file, *args, **kwargs):
"""
A with-context for zip files. Passes through positional and kwargs to zipfile.ZipFile.
"""
try:
zf = zipfile.ZipFile(path_or_file, *args, **kwargs)
except zipfile.BadZipfile as bze:
raise zipfile.BadZipfile("Bad Zipfile {0}: {1}".format(path_or_file, bze))
try:
yield zf
finally:
zf.close()
@contextmanager
def open_zip64(path_or_file, *args, **kwargs):
"""
A with-context for zip files with allowZip64 True.
Passes through positional and kwargs to openZip.
"""
allowZip64 = kwargs.pop('allowZip64', True)
with open_zip(path_or_file, *args, allowZip64=allowZip64, **kwargs) as zf:
yield zf
@contextmanager
def open_tar(path_or_file, *args, **kwargs):
"""
A with-context for tar files. Passes through positional and kwargs to tarfile.open.
If path_or_file is a file, caller must close it separately.
"""
(path, fileobj) = ((path_or_file, None) if isinstance(path_or_file, string_types)
else (None, path_or_file))
with closing(tarfile.open(path, *args, fileobj=fileobj, **kwargs)) as tar:
yield tar
class Timer(object):
"""Very basic with-context to time operations
Example usage:
>>> from pants.util.contextutil import Timer
>>> with Timer() as timer:
... time.sleep(2)
...
>>> timer.elapsed
2.0020849704742432
"""
def __init__(self, clock=time):
self._clock = clock
def __enter__(self):
self.start = self._clock.time()
self.finish = None
return self
@property
def elapsed(self):
if self.finish:
return self.finish - self.start
else:
return self._clock.time() - self.start
def __exit__(self, typ, val, traceback):
self.finish = self._clock.time()
|
|
__VERSION__="ete2-2.2rev1056"
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
from evolevents import EvolEvent
__all__ = ["get_evol_events_from_leaf", "get_evol_events_from_root"]
def get_evol_events_from_leaf(node, sos_thr=0.0):
""" Returns a list of duplication and speciation events in
which the current node has been involved. Scanned nodes are
also labeled internally as dup=True|False. You can access this
labels using the 'node.dup' sintaxis.
Method: the algorithm scans all nodes from the given leafName to
the root. Nodes are assumed to be duplications when a species
overlap is found between its child linages. Method is described
more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
"""
# Get the tree's root
root = node.get_tree_root()
# Checks that is actually rooted
outgroups = root.get_children()
if len(outgroups) != 2:
raise TypeError, "Tree is not rooted"
# Cautch the smaller outgroup (will be stored as the tree
# outgroup)
o1 = set([n.name for n in outgroups[0].get_leaves()])
o2 = set([n.name for n in outgroups[1].get_leaves()])
if len(o2)<len(o1):
smaller_outg = outgroups[1]
else:
smaller_outg = outgroups[0]
# Prepare to browse tree from leaf to root
all_events = []
current = node
ref_spcs = node.species
sister_leaves = set([])
browsed_spcs = set([current.species])
browsed_leaves = set([current])
# get family Size
fSize = len([n for n in root.get_leaves() if n.species == ref_spcs])
# Clean previous analysis
for n in root.get_descendants()+[root]:
n.del_feature("evoltype")
while current.up:
# distances control (0.0 distance check)
d = 0
for s in current.get_sisters():
for leaf in s.get_leaves():
d += current.get_distance(leaf)
sister_leaves.add(leaf)
# Process sister node only if there is any new sequence.
# (previene dupliaciones por nombres repetidos)
sister_leaves = sister_leaves.difference(browsed_leaves)
if len(sister_leaves)==0:
current = current.up
continue
# Gets species at both sides of event
sister_spcs = set([n.species for n in sister_leaves])
overlaped_spces = browsed_spcs & sister_spcs
all_spcs = browsed_spcs | sister_spcs
score = float(len(overlaped_spces))/len(all_spcs)
# Creates a new evolEvent
event = EvolEvent()
event.fam_size = fSize
event.seed = node.name
# event.e_newick = current.up.get_newick() # high mem usage!!
event.sos = score
event.outgroup = smaller_outg.name
# event.allseqs = set(current.up.get_leaf_names())
event.in_seqs = set([n.name for n in browsed_leaves])
event.out_seqs = set([n.name for n in sister_leaves])
event.inparalogs = set([n.name for n in browsed_leaves if n.species == ref_spcs])
# If species overlap: duplication
if score > sos_thr:# and d > 0.0: Removed branch control.
event.node = current.up
event.etype = "D"
event.outparalogs = set([n.name for n in sister_leaves if n.species == ref_spcs])
event.orthologs = set([])
current.up.add_feature("evoltype","D")
all_events.append(event)
# If NO species overlap: speciation
elif score == sos_thr:
event.node = current.up
event.etype = "S"
event.orthologs = set([n.name for n in sister_leaves if n.species != ref_spcs])
event.outparalogs = set([])
current.up.add_feature("evoltype","S")
all_events.append(event)
else:
pass # do not add event if distances == 0
# Updates browsed species
browsed_spcs |= sister_spcs
browsed_leaves |= sister_leaves
sister_leaves = set([])
# And keep ascending
current = current.up
return all_events
def get_evol_events_from_root(node, sos_thr):
""" Returns a list of **all** duplication and speciation
events detected after this node. Nodes are assumed to be
duplications when a species overlap is found between its child
linages. Method is described more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
"""
# Get the tree's root
root = node.get_tree_root()
# Checks that is actually rooted
outgroups = root.get_children()
if len(outgroups) != 2:
raise TypeError, "Tree is not rooted"
# Cautch the smaller outgroup (will be stored as the tree outgroup)
o1 = set([n.name for n in outgroups[0].get_leaves()])
o2 = set([n.name for n in outgroups[1].get_leaves()])
if len(o2)<len(o1):
smaller_outg = outgroups[1]
else:
smaller_outg = outgroups[0]
# Get family size
fSize = len( [n for n in root.get_leaves()] )
# Clean data from previous analyses
for n in root.get_descendants()+[root]:
n.del_feature("evoltype")
# Gets Prepared to browse the tree from root to leaves
to_visit = []
current = root
all_events = []
while current:
# Gets childs and appends them to the To_visit list
childs = current.get_children()
to_visit += childs
if len(childs)>2:
raise TypeError, "nodes are expected to have two childs."
elif len(childs)==0:
pass # leaf
else:
# Get leaves and species at both sides of event
sideA_leaves= set([n for n in childs[0].get_leaves()])
sideB_leaves= set([n for n in childs[1].get_leaves()])
sideA_spcs = set([n.species for n in childs[0].get_leaves()])
sideB_spcs = set([n.species for n in childs[1].get_leaves()])
# Calculates species overlap
overlaped_spcs = sideA_spcs & sideB_spcs
all_spcs = sideA_spcs | sideB_spcs
score = float(len(overlaped_spcs))/len(all_spcs)
# Creates a new evolEvent
event = EvolEvent()
event.fam_size = fSize
event.branch_supports = [current.support, current.children[0].support, current.children[1].support]
# event.seed = leafName
# event.e_newick = current.up.get_newick() # high mem usage!!
event.sos = score
event.outgroup_spcs = smaller_outg.get_species()
event.in_seqs = set([n.name for n in sideA_leaves])
event.out_seqs = set([n.name for n in sideB_leaves])
event.inparalogs = set([n.name for n in sideA_leaves])
# If species overlap: duplication
if score >sos_thr:
event.node = current
event.etype = "D"
event.outparalogs = set([n.name for n in sideB_leaves])
event.orthologs = set([])
current.add_feature("evoltype","D")
# If NO species overlap: speciation
else:
event.node = current
event.etype = "S"
event.orthologs = set([n.name for n in sideB_leaves])
event.outparalogs = set([])
current.add_feature("evoltype","S")
all_events.append(event)
# Keep visiting nodes
try:
current = to_visit.pop(0)
except IndexError:
current = None
return all_events
|
|
__all__ = ['extract_ssi', 'extract_ssi_to_file',
'extract_eta', 'extract_eta_to_file',
'extract_Q_channel', 'extract_Q_down',
'extract_overland_volume', 'extract_overland_volume_to_file']
from datetime import timedelta
from configparser import SafeConfigParser
import h5py
import numpy as np
import numpy.ma as ma
# gzip compression flag
comp = 6
def extract_Q_down(control_fname):
"""Extract combined soil and overland out flow rates.
Read a PyTOPKAPI simulation file and return the combined overland
andsoil store outflows in a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
Qdown : Numpy array
A Numpy array containing the simulated outflow flow rates from
the overland and soil store of each cell.
"""
config = SafeConfigParser()
config.read(control_fname)
sim_fname = config.get('output_files', 'file_out')
tkpi_file = h5py.File(sim_fname, 'r')
Qdown = tkpi_file['/Q_down'][...]
tkpi_file.close()
return Qdown
def extract_Q_channel(control_fname):
"""Extract channel flow rates from a PyTOPKAPI simulation file.
Read a PyTOPKAPI simulation file and return the simulated channel
flows in a Numpy masked array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
Qc : Numpy masked array
A Numpy masked array containing the simulated flow rates for
channel cells.
"""
config = SafeConfigParser()
config.read(control_fname)
param_fname = config.get('input_files', 'file_cell_param')
sim_fname = config.get('output_files', 'file_out')
params = np.loadtxt(param_fname)
tkpi_file = h5py.File(sim_fname, 'r')
Qc = tkpi_file['/Channel/Qc_out'][...]
tkpi_file.close()
channel_mask = params[:, 3]
cond = params[:, 3]*np.ones(Qc.shape, dtype=np.int) != 1
Qc = np.ma.masked_where(cond, Qc)
return Qc
def extract_overland_volume(control_fname):
"""Extract the volumes in the overland stores.
Read a PyTOPKAPI simulation file and return the combined overland
and store volumes in a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current directory
(or the root of the file system).
Returns
-------
Vo : Numpy array
A Numpy array containing the simulated storage volume in the
overland store of each cell.
"""
config = SafeConfigParser()
config.read(control_fname)
sim_fname = config.get('output_files', 'file_out')
tkpi_file = h5py.File(sim_fname, 'r')
Vo = tkpi_file['/Overland/V_o'][...]
tkpi_file.close()
return Vo
def extract_overland_volume_to_file(sim_fname, param_fname,
result_fname, start_dt, timestep):
"""Extract the volumes in the overland stores to a file.
Read a TOPKAPI simulation file and it's associated parameter file
and extract the overland store volumes for each timestep. Store
the results in a new HDF5 file, grouped by date and containing
datasets of latitude, longitude and storage volume.
Parameters
----------
sim_fname : string
The name of a PyTOPKAPI simulation file. This should include
the full or relative path.
param_fname : string
The name of a parameter file describing the catchment. This
should include the full or relative path.
result_fname : string
The name of an HDF5 file to store the output. This should
include the full or relative path.
start_dt : datetime.datetime
The starting date and time of the simulated results in
`sim_fname`.
timestep : int
The length of each model time-step in seconds.
Returns
-------
Nothing
"""
params = np.loadtxt(param_fname)
x = params[:, 1]
y = params[:, 2]
soil_depth = params[:, 8]
soil_depth = ma.masked_values(soil_depth, 0.0)
x = ma.array(x, mask=soil_depth.mask).compressed()
y = ma.array(y, mask=soil_depth.mask).compressed()
tkpi_file = h5py.File(sim_fname, 'r')
result_file = h5py.File(result_fname, 'w')
overland_vol = tkpi_file['/Overland/V_o'][...]
tkpi_file.close()
rows, cols = overland_vol.shape
# y
dset = result_file.require_dataset('y', shape=y.shape,
dtype=np.float32, compression=comp)
dset[...] = y
dset.attrs['name'] = 'y coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
# x
dset = result_file.require_dataset('x', shape=x.shape,
dtype=np.float32, compression=comp)
dset[...] = x
dset.attrs['name'] = 'x coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
curr_dt = start_dt
for k in range(rows):
print(curr_dt)
ov = ma.array(overland_vol[k], mask=soil_depth.mask).compressed()
dset = result_file.require_dataset(curr_dt.strftime('%Y%m%d%H00'),
shape=ov.shape,
dtype=np.float32, compression=comp)
dset[...] = ov
dset.attrs['name'] = 'TOPKAPI overland store volume'
dset.attrs['units'] = 'm^3'
curr_dt += timedelta(seconds=timestep)
tkpi_file.close()
result_file.close()
def extract_ssi(control_fname):
"""Extract SSI from a PyTOPKAPI simulation file.
Read a PyTOPKAPI simulation file and it's associated parameter
file and compute the Soil Saturation Index (SSI) for each model
cell and timestep. The results are returned as a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
ssi : Numpy ndarray
A Numpy array containing the calculated SSI values.
"""
config = SafeConfigParser()
config.read(control_fname)
global_param_fname = config.get('input_files', 'file_global_param')
param_fname = config.get('input_files', 'file_cell_param')
sim_fname = config.get('output_files', 'file_out')
fac_L = config.getfloat('calib_params', 'fac_L')
params = np.loadtxt(param_fname)
glob_params = np.genfromtxt(global_param_fname, names=True)
soil_depth = fac_L*params[:, 8]
factor = params[:, 11] - params[:, 10]
cell_area = glob_params['X']**2 # m^2
soil_depth = ma.masked_values(soil_depth, 0.0)
factor = ma.array(factor, mask=soil_depth.mask)
div = factor*soil_depth*cell_area
tkpi_file = h5py.File(sim_fname, 'r')
soil_vol = tkpi_file['/Soil/V_s'][...]
tkpi_file.close()
# ssi = (Vs/cell_vol)*100
# cell_vol = (theta_s - theta_r)*soil_depth*cell_area
sv = ma.array(soil_vol, mask=soil_depth.mask)
ssi = (sv/(div))*100.0
return ssi
def extract_ssi_to_file(sim_fname, param_fname,
result_fname, start_dt, timestep):
"""Extract percentage saturation to a file
Read a TOPKAPI simulation file and it's associated parameter file
and compute the SSI for each timestep. Store the results in a new
HDF5 file, grouped by date and containing datasets of latitude,
longitude and SSI value.
Parameters
----------
sim_fname : string
The name of a PyTOPKAPI simulation file. This should include
the full or relative path.
param_fname : string
The name of a parameter file describing the catchment. This
should include the full or relative path.
result_fname : string
The name of an HDF5 file to store the output. This should
include the full or relative path.
start_dt : datetime.datetime
The starting date and time of the simulated results in
`sim_fname`.
timestep : int
The length of each model time-step in seconds.
Returns
-------
Nothing
"""
params = np.loadtxt(param_fname)
x = params[:, 1]
y = params[:, 2]
soil_depth = params[:, 8]
factor = params[:, 11] - params[:, 10]
cell_area = 1000.0**2 # m^2
soil_depth = ma.masked_values(soil_depth, 0.0)
factor = ma.array(factor, mask=soil_depth.mask)
x = ma.array(x, mask=soil_depth.mask).compressed()
y = ma.array(y, mask=soil_depth.mask).compressed()
div = factor*soil_depth*cell_area
tkpi_file = h5py.File(sim_fname, 'r')
result_file = h5py.File(result_fname, 'w')
soil_vol = tkpi_file['/Soil/V_s'][...]
tkpi_file.close()
rows, cols = soil_vol.shape
# y
dset = result_file.require_dataset('y', shape=y.shape,
dtype=np.float32, compression=comp)
dset[...] = y
dset.attrs['name'] = 'y coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
# x
dset = result_file.require_dataset('x', shape=x.shape,
dtype=np.float32, compression=comp)
dset[...] = x
dset.attrs['name'] = 'x coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
curr_dt = start_dt
for k in range(rows):
print(curr_dt)
# ssi = (Vs/cell_vol)*100
# cell_vol = (theta_s - theta_r)*soil_depth*cell_area
sv = ma.array(soil_vol[k], mask=soil_depth.mask)
ssi = (sv/(div))*100.0
ssi = ssi.compressed()
# ssi
dset = result_file.require_dataset(curr_dt.strftime('%Y%m%d%H00'),
shape=ssi.shape,
dtype=np.float32, compression=comp)
dset[...] = ssi
dset.attrs['name'] = 'TOPKAPI soil saturation index'
dset.attrs['units'] = '% saturation'
curr_dt += timedelta(seconds=timestep)
tkpi_file.close()
result_file.close()
def extract_eta(control_fname):
"""Extract ETa from a PyTOPKAPI simulation file.
Read a PyTOPKAPI simulation file and it's associated parameter file
and extract the actual evapotranspiration for each model cell and
timestep. The results are returned as a Numpy array.
Parameters
----------
control_fname : string
The file name of a PyTOPKAPI simulation control file. The name
should contain the full path relative to the current
directory.
Returns
-------
eta : Numpy ndarray
A Numpy array containing the calculated ETa values.
"""
config = SafeConfigParser()
config.read(control_fname)
param_fname = config.get('input_files', 'file_cell_param')
sim_fname = config.get('output_files', 'file_out')
params = np.loadtxt(param_fname)
soil_depth = params[:, 8]
soil_depth = ma.masked_values(soil_depth, 0.0)
tkpi_file = h5py.File(sim_fname, 'r')
eta = tkpi_file['/ET_out'][...]
tkpi_file.close()
eta = ma.array(eta, mask=soil_depth.mask)
return eta
def extract_eta_to_file(sim_fname, param_fname,
result_fname, start_dt, timestep):
"""Extract actual evapotranspiration to a file
Read a PyTOPKAPI simulation file and it's associated parameter
file and extract the actual evapotranspiration for each
timestep. Store the results in a new HDF5 file, grouped by date
and containing datasets of latitude, longitude and ETa value.
Parameters
----------
sim_fname : string
The name of a PyTOPKAPI simulation file. This should include
the full or relative path.
param_fname : string
The name of a parameter file describing the catchment. This
should include the full or relative path.
result_fname : string
The name of an HDF5 file to store the output. This should
include the full or relative path.
start_dt : datetime.datetime
The starting date and time of the simulated results in
`sim_fname`.
timestep : int
The length of each model time-step in seconds.
Returns
-------
Nothing
"""
params = np.loadtxt(param_fname)
x = params[:, 1]
y = params[:, 2]
soil_depth = params[:, 8]
soil_depth = ma.masked_values(soil_depth, 0.0)
x = ma.array(x, mask=soil_depth.mask).compressed()
y = ma.array(y, mask=soil_depth.mask).compressed()
tkpi_file = h5py.File(sim_fname, 'r')
result_file = h5py.File(result_fname, 'w')
eta = tkpi_file['/ET_out'][...]
tkpi_file.close()
rows, cols = eta.shape
# y
dset = result_file.require_dataset('y', shape=y.shape,
dtype=np.float32, compression=comp)
dset[...] = y
dset.attrs['name'] = 'y coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
# x
dset = result_file.require_dataset('x', shape=x.shape,
dtype=np.float32, compression=comp)
dset[...] = x
dset.attrs['name'] = 'x coordinate'
dset.attrs['units'] = 'Projection dependent (Metres or Decimal degrees)'
curr_dt = start_dt
for k in range(rows):
print(curr_dt)
et = ma.array(eta[k], mask=soil_depth.mask)
et = et.compressed()
# ETa
dset = result_file.require_dataset(curr_dt.strftime('%Y%m%d%H00'),
shape=et.shape,
dtype=np.float32, compression=comp)
dset[...] = et
dset.attrs['name'] = 'PyTOPKAPI actual ET'
dset.attrs['units'] = 'mm'
curr_dt += timedelta(seconds=timestep)
result_file.close()
|
|
import abc
import os.path
import shutil
from subprocess import CalledProcessError, check_call, check_output
from pkgpanda.exceptions import ValidationError
from pkgpanda.util import download_atomic, sha1
# Ref must be a git sha-1. We then pass it through get_sha1 to make
# sure it is a sha-1 for the commit, not the tree, tag, or any other
# git object.
def is_sha(sha_str):
try:
return int(sha_str, 16) and len(sha_str) == 40
except ValueError:
return False
def fetch_git(bare_folder, git_uri):
# Do a git clone if the cache folder doesn't exist yet, otherwise
# do a git pull of everything.
if not os.path.exists(bare_folder):
check_call(["git", "clone", "--mirror", "--progress", git_uri, bare_folder])
else:
check_call([
"git",
"--git-dir",
bare_folder,
"remote",
"set-url",
"origin",
git_uri])
check_call([
"git",
"--git-dir",
bare_folder,
"remote",
"update",
"origin"])
return bare_folder
class SourceFetcher(metaclass=abc.ABCMeta):
def __init__(self, src_info):
self.kind = src_info['kind']
@abc.abstractmethod
def get_id(self):
"""Returns a unique id for the particular version of the particular source (sha1 of tarball, git commit, etc)"""
pass
@abc.abstractmethod
def checkout_to(self, directory):
"""Makes the artifact appear in the passed directory"""
pass
def get_git_sha1(bare_folder, ref):
try:
return check_output([
"git",
"--git-dir", bare_folder,
"rev-parse", ref + "^{commit}"
]).decode('ascii').strip()
except CalledProcessError as ex:
raise ValidationError(
"Unable to find ref '{}' in '{}': {}".format(ref, bare_folder, ex)) from ex
class GitSrcFetcher(SourceFetcher):
def __init__(self, src_info, cache_dir):
super().__init__(src_info)
assert self.kind == 'git'
if src_info.keys() != {'kind', 'git', 'ref', 'ref_origin'}:
raise ValidationError(
"git source must have keys 'git' (the repo to fetch), 'ref' (the sha-1 to "
"checkout), and 'ref_origin' (the branch/tag ref was derived from)")
if not is_sha(src_info['ref']):
raise ValidationError("ref must be a sha1. Got: {}".format(src_info['ref']))
self.url = src_info['git']
self.ref = src_info['ref']
self.ref_origin = src_info['ref_origin']
self.bare_folder = cache_dir + "/cache.git".format()
def get_id(self):
return {"commit": self.ref}
def checkout_to(self, directory):
# fetch into a bare repository so if we're on a host which has a cache we can
# only get the new commits.
fetch_git(self.bare_folder, self.url)
# Warn if the ref_origin is set and gives a different sha1 than the
# current ref.
try:
origin_commit = get_git_sha1(self.bare_folder, self.ref_origin)
except Exception as ex:
raise ValidationError("Unable to find sha1 of ref_origin {}: {}".format(self.ref_origin, ex))
if self.ref != origin_commit:
print(
"WARNING: Current ref doesn't match the ref origin. "
"Package ref should probably be updated to pick up "
"new changes to the code:" +
" Current: {}, Origin: {}".format(self.ref,
origin_commit))
# Clone into `src/`.
check_call(["git", "clone", "-q", self.bare_folder, directory])
# Checkout from the bare repo in the cache folder at the specific sha1
check_call([
"git",
"--git-dir",
directory + "/.git",
"--work-tree",
directory, "checkout",
"-f",
"-q",
self.ref])
class GitLocalSrcFetcher(SourceFetcher):
def __init__(self, src_info, cache_dir, working_directory):
super().__init__(src_info)
assert self.kind == 'git_local'
if src_info.keys() > {'kind', 'rel_path'}:
raise ValidationError("Only kind, rel_path can be specified for git_local")
if os.path.isabs(src_info['rel_path']):
raise ValidationError("rel_path must be a relative path to the current directory "
"when used with git_local. Using a relative path means others "
"that clone the repository will have things just work rather "
"than a path.")
self.src_repo_path = os.path.normpath(working_directory + '/' + src_info['rel_path']).rstrip('/')
# Make sure there are no local changes, we can't `git clone` local changes.
try:
git_status = check_output([
'git',
'-C',
self.src_repo_path,
'status',
'--porcelain',
'-uno',
'-z']).decode()
if len(git_status):
raise ValidationError("No local changse are allowed in the git_local_work base repository. "
"Use `git -C {0} status` to see local changes. "
"All local changes must be committed or stashed before the "
"package can be built. One workflow (temporary commit): `git -C {0} "
"commit -am TMP` to commit everything, build the package, "
"`git -C {0} reset --soft HEAD^` to get back to where you were.\n\n"
"Found changes: {1}".format(self.src_repo_path, git_status))
except CalledProcessError:
raise ValidationError("Unable to check status of git_local_work checkout {}. Is the "
"rel_path correct?".format(src_info['rel_path']))
self.commit = get_git_sha1(self.src_repo_path + "/.git", "HEAD")
def get_id(self):
return {"commit": self.commit}
def checkout_to(self, directory):
# Clone into `src/`.
check_call(["git", "clone", "-q", self.src_repo_path, directory])
# Make sure we got the right commit as head
assert get_git_sha1(directory + "/.git", "HEAD") == self.commit
# Checkout from the bare repo in the cache folder at the specific sha1
check_call([
"git",
"--git-dir",
directory + "/.git",
"--work-tree",
directory, "checkout",
"-f",
"-q",
self.commit])
def _identify_archive_type(filename):
"""Identify archive type basing on extension
Args:
filename: the path to the archive
Returns:
Currently only zip and tar.*/tgz archives are supported. The return values
for them are 'tar' and 'zip' respectively
"""
parts = filename.split('.')
# no extension
if len(parts) < 2:
return 'unknown'
# one extension
if parts[-1] == 'tgz':
return 'tar'
if parts[-1] == 'zip':
return 'zip'
# two extensions
if len(parts) >= 3 and parts[-2] == 'tar':
return 'tar'
return 'unknown'
def _check_components_sanity(path):
"""Check if archive is sane
Check if there is only one top level component (directory) in the extracted
archive's directory.
Args:
path: path to the extracted archive's directory
Raises:
Raise an exception if there is anything else than a single directory
"""
dir_contents = os.listdir(path)
if len(dir_contents) != 1 or not os.path.isdir(os.path.join(path, dir_contents[0])):
raise ValidationError("Extracted archive has more than one top level"
"component, unable to strip it.")
def _strip_first_path_component(path):
"""Simulate tar's --strip-components=1 behaviour using file operations
Unarchivers like unzip do not support stripping component paths while
inflating the archive. This function simulates this behaviour by moving
files around and then removing the TLD directory.
Args:
path: path where extracted archive contents can be found
"""
_check_components_sanity(path)
top_level_dir = os.path.join(path, os.listdir(path)[0])
contents = os.listdir(top_level_dir)
for entry in contents:
old_path = os.path.join(top_level_dir, entry)
new_path = os.path.join(path, entry)
os.rename(old_path, new_path)
os.rmdir(top_level_dir)
def extract_archive(archive, dst_dir):
archive_type = _identify_archive_type(archive)
if archive_type == 'tar':
check_call(["tar", "-xf", archive, "--strip-components=1", "-C", dst_dir])
elif archive_type == 'zip':
check_call(["unzip", "-x", archive, "-d", dst_dir])
# unzip binary does not support '--strip-components=1',
_strip_first_path_component(dst_dir)
else:
raise ValidationError("Unsupported archive: {}".format(os.path.basename(archive)))
class UrlSrcFetcher(SourceFetcher):
def __init__(self, src_info, cache_dir, working_directory):
super().__init__(src_info)
assert self.kind in {'url', 'url_extract'}
if src_info.keys() != {'kind', 'sha1', 'url'}:
raise ValidationError(
"url and url_extract sources must have exactly 'sha1' (sha1 of the artifact"
" which will be downloaded), and 'url' (url to download artifact) as options")
self.url = src_info['url']
self.extract = (self.kind == 'url_extract')
self.cache_dir = cache_dir
self.cache_filename = self._get_filename(cache_dir)
self.working_directory = working_directory
self.sha = src_info['sha1']
def _get_filename(self, out_dir):
assert '://' in self.url, "Scheme separator not found in url {}".format(self.url)
return os.path.join(out_dir, os.path.basename(self.url.split('://', 2)[1]))
def get_id(self):
return {
"downloaded_sha1": self.sha
}
def checkout_to(self, directory):
# Download file to cache if it isn't already there
if not os.path.exists(self.cache_filename):
print("Downloading source tarball {}".format(self.url))
download_atomic(self.cache_filename, self.url, self.working_directory)
# Validate the sha1 of the source is given and matches the sha1
file_sha = sha1(self.cache_filename)
if self.sha != file_sha:
corrupt_filename = self.cache_filename + '.corrupt'
check_call(['mv', self.cache_filename, corrupt_filename])
raise ValidationError(
"Provided sha1 didn't match sha1 of downloaded file, corrupt download saved as {}. "
"Provided: {}, Download file's sha1: {}, Url: {}".format(
corrupt_filename, self.sha, file_sha, self.url))
if self.extract:
extract_archive(self.cache_filename, directory)
else:
# Copy the file(s) into src/
# TODO(cmaloney): Hardlink to save space?
shutil.copyfile(self.cache_filename, self._get_filename(directory))
all_fetchers = {
"git": GitSrcFetcher,
"git_local": GitLocalSrcFetcher,
"url": UrlSrcFetcher,
"url_extract": UrlSrcFetcher
}
|
|
#!/usr/bin/env python3
import base64
import collections
import concurrent.futures
import functools
import gi
import glob
import html.entities
import os
import os.path
import re
import subprocess
import sys
import threading
import unidecode
import urllib.error
import urllib.parse
import urllib.request
from gi.repository import GLib
############################################################
def memoized_property(fget):
attr_name = '_{0}'.format(fget.__name__)
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized)
############################################################
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
############################################################
class BibFetcher:
########################################################
# Guess data for one file
########################################################
def __init__(self, path):
self.original_path = path
self.path = path
self.filename = os.path.basename(self.path)
rgx = re.compile(r"""
^
(?P<basename>
(?P<authors> .+)
_
(?P<year> \d\d\d\d)?
_
(?P<title> .+)
)
\.
(?P<extension> pdf|djvu|ps)
$
""", re.X)
@memoized_property
def rgx_match_or_arxiv(self):
m = self.rgx.match(self.filename)
if not m:
if self.arxiv_id_from_pdf is not None:
cpath = self.arxiv_canonical_path
if cpath is not None:
self.path = cpath
self.filename = cpath
m = self.rgx.match(self.filename)
return m
@memoized_property
def data(self):
m = self.rgx_match_or_arxiv
if m:
return m.groupdict()
else:
return {}
@memoized_property
def is_good(self):
try:
self.data['basename']
except KeyError:
return False
else:
return True
@memoized_property
def basename(self):
if self.is_good:
return self.data['basename']
else:
return os.path.splitext(self.filename)[0]
@memoized_property
def extension(self):
return self.data['extension']
@memoized_property
def authors(self):
au = self.data['authors']
#print(au)
au = re.sub(r'(^|_)(de|Du)_', r'\1\2 ', au)
#au = au.replace('de_', 'de ')
au = au.split('_')
return au
@memoized_property
def year(self):
return self.data['year']
@memoized_property
def title(self):
title = self.data['title']
return title.replace('_', ' ')
@memoized_property
def safe_title(self):
safe = ""
for c in self.title:
if c == "$":
break
else:
safe += c
return safe
@memoized_property
def short_title(self):
short = self.safe_title.split()
short = " ".join(short[:5])
return short
@memoized_property
def title_set(self):
ts = self.safe_title.split()
return set(unidecode.unidecode(t) for t in ts)
########################################################
# Fetch all the bibtex
########################################################
@memoized_property
def bibtex(self):
if not self.is_good:
bib = [self.bibtex_head,
self.msn_not_found,
self.zbmath_not_found,
self.arxiv_not_found]
else:
bib = [self.bibtex_head,
self.msn_bib,
self.zbmath_bib,
self.arxiv_bib]
return "\n".join(bib)
@memoized_property
def bibtex_head(self):
return "% {}\n".format(self.filename)
@property
def bib_status(self):
return (self.msn_status
+ self.zbmath_status
+ self.arxiv_status
+ self.per_status)
########################################################
# Fetch MathSciNet bibtex
########################################################
msn_not_found = "% No MathSciNet entry found.\n"
msn_status = " "
@memoized_property
def msn_bib(self):
bibs = self.msn_bib_bibtex(True)
if bibs:
bibs += self.msn_bib_amsrefs(True)
elif self.year is not None:
bibs = self.msn_bib_bibtex(False)
if bibs:
bibs += self.msn_bib_amsrefs(False)
if bibs:
self.msn_status = "M"
bib = "\n".join(bibs)
bib = re.sub(r"@(\w+) {MR", r"@\1{MR", bib)
bib = re.sub(
r"^\s*(\w+)\s+=\s+",
lambda m: m.group(0).lower(),
bib, flags=re.M)
return bib
else:
self.msn_status = "-"
return self.msn_not_found
msn_rgx_bibtex = re.compile(r"^@.*?^}", re.M | re.S)
msn_rgx_amsrefs = re.compile(r"^\\bib.*?^}", re.M | re.S)
def msn_bib_bibtex(self, use_year):
return self.msn_bib_aux("bibtex", self.msn_rgx_bibtex, use_year)
def msn_bib_amsrefs(self, use_year):
return self.msn_bib_aux("amsrefs", self.msn_rgx_amsrefs, use_year)
def msn_bib_aux(self, fmt, regex, use_year):
url = self.msn_url(fmt, use_year)
html = self.get_html(url, use_proxy=True)
if not html:
return []
bibs = re.findall(regex, html)
if bibs:
return [ bib+"\n" for bib in bibs ]
else:
return []
msn_root = "http://www.ams.org/mathscinet/search/publications.html?fmt="
def msn_url(self, fmt, use_year=True):
if use_year:
return self.msn_root + fmt + self.msn_query_year
else:
return self.msn_root + fmt + self.msn_query
@memoized_property
def msn_query(self):
return self.msn_query_aux[0]
@memoized_property
def msn_query_year(self):
return self.msn_query_aux[1]
@memoized_property
def msn_query_aux(self):
query = ""
tmpl = "&pg{num}={key}&s{num}={val}".format
title = urllib.parse.quote(self.short_title)
query += tmpl(num=1, key='TI', val=title)
for n, author in enumerate(self.authors, 3):
author = urllib.parse.quote(author)
query += tmpl(num=n, key='AUCN', val=author)
year_query = query
if self.year is not None:
year_query += tmpl(num=2, key='YR', val=self.year)
return query, year_query
########################################################
# Fetch zbMATH bibtex
########################################################
zbmath_not_found = "% No zbMATH entry found.\n"
zbmath_status = " "
@memoized_property
def zbmath_bib(self):
html = self.get_html(self.zbmath_url_year)
if not html:
self.zbmath_status = "-"
return self.zbmath_not_found
m = re.search(r"bibtex/(\d|\.)+\.bib", html)
if not m:
html = self.get_html(self.zbmath_url)
if not html:
self.zbmath_status = "-"
return self.zbmath_not_found
m = re.search(r"bibtex/(\d|\.)+\.bib", html)
if not m:
self.zbmath_status = "-"
return self.zbmath_not_found
else:
bib_url = "https://zbmath.org/" + m.group(0)
bib = self.get_html(bib_url)
if bib:
self.zbmath_status = "Z"
return bib + "\n"
else:
self.zbmath_status = "-"
return self.zbmath_not_found
@memoized_property
def zbmath_url_year(self):
url = []
tmpl = "{key}: {val}".format
title = urllib.parse.quote(self.safe_title)
url.append(tmpl(key='ti', val=title))
if self.year is not None:
url.append(tmpl(key='py', val=self.year))
url.append(tmpl(key='au', val=self.zbmath_author_aux))
url = " %26 ".join(url)
url = "https://zbmath.org/?q=" + url
return url
@memoized_property
def zbmath_url(self):
url = []
tmpl = "{key}: {val}".format
title = urllib.parse.quote(self.safe_title)
url.append(tmpl(key='ti', val=title))
url.append(tmpl(key='au', val=self.zbmath_author_aux))
url = " %26 ".join(url)
url = "https://zbmath.org/?q=" + url
return url
@memoized_property
def zbmath_author_aux(self):
authors = []
for au in self.authors:
au2 = au.replace(" ","")
if au == au2:
authors.append(au)
else:
authors.append("({}|{})".format(au, au2))
authors = [urllib.parse.quote(author) for author in authors]
return " ".join(authors)
########################################################
# Fetch arXiv bibtex
########################################################
arxiv_not_found = "% No arXiv entry found.\n"
arxiv_status = " "
@memoized_property
def arxiv_bib(self):
if self.arxiv_id is None:
self.arxiv_status = "-"
else:
self.arxiv_status = "A"
return self.arxiv_bib_aux
@memoized_property
def arxiv_id(self):
arxiv_id = self.arxiv_id_from_pdf
if arxiv_id is None:
arxiv_id = self.get_arxiv_id_from_web()
return arxiv_id
@memoized_property
def arxiv_id_from_pdf(self):
with open(self.path, "r", encoding="latin-1") as pdffile:
pdfdata = pdffile.read()
m = re.search(r"/URI\(http://ar[Xx]iv.org/abs/(.+)\)", pdfdata)
if not m:
return None
else:
return m.group(1)
arxiv_rgx = re.compile(r"""
< (?P<type> id|title ) >
(?:
(?:
http://arxiv\.org/abs/
(?P<arxiv_id> .*? )
(?: v\d+ )?
)
|
(?P<title> .*? )
)
</ (?P=type) >
""", re.X)
def title_match(self, other_title):
ots = other_title.split()
ots = set(unidecode.unidecode(t) for t in ots)
inter = ots.intersection(self.title_set)
goal = min(6, len(ots), len(self.title_set))
return len(inter) >= goal
def get_arxiv_id_from_web(self):
atom = self.get_html(self.arxiv_atom_url)
if not atom:
return None
arxiv_id = ""
for m in self.arxiv_rgx.finditer(atom):
md = m.groupdict()
if md['type'] == 'id':
arxiv_id = md['arxiv_id']
elif arxiv_id and self.title_match(md['title']):
return arxiv_id
@memoized_property
def arxiv_atom_url(self):
queries = []
tmpl = "{key}:{val}".format
title = unidecode.unidecode(self.safe_title)
title = urllib.parse.quote(title)
queries.append(tmpl(key='ti', val=title))
for author in self.authors:
author = unidecode.unidecode(author)
author = urllib.parse.quote(author)
queries.append(tmpl(key='au', val=author))
root = "http://export.arxiv.org/api/query?search_query="
query = "+AND+".join(queries)
return root + query
@memoized_property
def arxiv_data(self):
if self.arxiv_id is None:
return {}
url = "http://arxiv.org/abs/" + self.arxiv_id
html = self.get_html(url)
regex = r"<meta\s*name=\"citation_(%s)\"\s*content=\"(.*)\"\s*/>"
regex = regex % "title|author|date|arxiv_id|pdf_url|doi"
data = {}
for line in html.split("\n"):
m = re.match(regex, line)
if m:
key = m.group(1)
content = m.group(2)
if key == "author" or key == "title":
content = unescape(content)
if key == "author" and "author" in data:
data["author"] += " and " + content
elif key == "arxiv_id":
data["archivePrefix"] = "arXiv"
data["eprint"] = content
elif key == "pdf_url":
data["pdf_url"] = content
content = content.replace("pdf", "abs", 1)
data["url"] = content
else:
data[key] = content
return data
@memoized_property
def arxiv_bib_aux(self):
if self.arxiv_id is None:
return self.arxiv_not_found
data = self.arxiv_data.copy()
if "eprint" in data:
bibtex = "@article{arXiv:%s,\n" % data["eprint"]
else:
bibtex = "@article{arXiv:ERROR,\n"
for key in "author", "title", "date", "archivePrefix", "eprint":
if key in data:
val = data[key]
bibtex += " %s = {%s},\n" % (key, val)
del data[key]
for key,val in data.items():
bibtex += " %s = {%s},\n" % (key, val)
bibtex += "}\n"
return bibtex
@memoized_property
def arxiv_canonical_path(self):
if self.arxiv_id is None:
return None
data = self.arxiv_data.copy()
if "author" not in data:
return None
if "title" not in data:
return None
authors = data["author"].split(" and ")
authors = [ au.split(",")[0] for au in authors ]
authors = "_".join(authors)
if "date" in data:
year = data["date"][:4]
else:
year = ""
title = data["title"]
cpath = authors + "_" + year + "_" + title + ".pdf"
cpath = cpath.replace(" ", "_")
return cpath
########################################################
# Personal bibtex entries
########################################################
@memoized_property
def personal_bib(self):
if not self.personal_bib_exists:
return ""
with open(self.personal_bib_path, encoding="utf-8") as pfile:
return pfile.read()
@memoized_property
def personal_bib_path(self):
home = os.path.expanduser("~")
pdir = os.path.join(home, ".pulp-bib", "personal")
return os.path.join(pdir, self.basename + ".bib")
@memoized_property
def personal_bib_exists(self):
return os.path.exists(self.personal_bib_path)
def save_personal_bib(self, pbib):
pdir = os.path.dirname(self.personal_bib_path)
if not os.path.exists(pdir):
os.makedirs(pdir)
with open(self.personal_bib_path, "w", encoding="utf-8") as pfile:
pfile.write(pbib)
self._personal_bib_exists = True
self._personal_bib = pbib
########################################################
# Cache of bibtex entries
########################################################
@memoized_property
def cache_bib(self):
if not self.cache_bib_exists:
return ""
with open(self.cache_bib_path, encoding="utf-8") as cfile:
return cfile.read()
@memoized_property
def cache_bib_path(self):
home = os.path.expanduser("~")
cdir = os.path.join(home, ".pulp-bib", "cache")
return os.path.join(cdir, self.basename + ".bib")
@memoized_property
def cache_bib_exists(self):
return os.path.exists(self.cache_bib_path)
def save_cache_bib(self, bib=None):
cdir = os.path.dirname(self.cache_bib_path)
if not os.path.exists(cdir):
os.makedirs(cdir)
if bib == None:
bib = self.bibtex
with open(self.cache_bib_path, "w", encoding="utf-8") as cfile:
cfile.write(bib)
self._cache_bib_exists = True
self._cache_bib = bib
########################################################
# Url opener with proxy
########################################################
def get_html(self, url, use_proxy=False):
url = url.replace(' ', '%20')
req = urllib.request.Request(url)
if use_proxy:
self.add_proxy(req)
for try_num in range(10):
try:
handle = urllib.request.urlopen(req)
except:
handle = None
continue
else:
break
if handle is None:
return
enc = handle.headers.get_content_charset()
html = handle.read()
html = html.decode(enc)
return html
def add_proxy(self, req):
req.set_proxy("proxy.csic.es:3128", req.type)
authheader = "Basic MzQ5OTAzNTVIOkY1ZzhyNGUz"
req.add_header("Proxy-Authorization", authheader)
############################################################
class ThreadedBibFetcher:
def __init__(self, path):
self.path = path
self.bib_fetcher = None
self.worker_done = False
self.worker_running = False
self.first_callbacks = []
self.callbacks = []
def check_fetcher(self):
if self.bib_fetcher is None:
self.bib_fetcher = BibFetcher(self.path)
def run_thread(self):
if self.worker_running:
return
self.worker_running = True
thread = threading.Thread(
target = self.thread_worker,
args = (self.bib_fetcher,),
daemon = True)
thread.start()
def thread_worker(self, fetcher):
cache_bib = fetcher.cache_bib
personal_bib = fetcher.personal_bib
GLib.idle_add(self.worker_first_callback)
bibtex = fetcher.bibtex
fetcher.save_cache_bib()
GLib.idle_add(self.worker_first_callback)
GLib.idle_add(self.worker_callback)
def worker_first_callback(self):
while self.first_callbacks:
cb, args = self.first_callbacks.pop(0)
GLib.idle_add(cb,
self.bib_fetcher.cache_bib,
self.bib_fetcher.personal_bib,
*args)
def worker_callback(self):
self.worker_done = True
while self.callbacks:
cb, args = self.callbacks.pop(0)
GLib.idle_add(cb, self.bib_fetcher.bibtex, *args)
def async_get_bibtex(self, first_callback, callback, *args):
self.check_fetcher()
if self.worker_done:
GLib.idle_add(first_callback,
self.bib_fetcher.cache_bib,
self.bib_fetcher.personal_bib,
*args)
GLib.idle_add(callback,
self.bib_fetcher.bibtex,
*args)
else:
self.first_callbacks.append([first_callback, args])
self.callbacks.append([callback, args])
self.run_thread()
def save_personal_bib(self, pbib):
self.check_fetcher()
self.bib_fetcher.save_personal_bib(pbib)
|
|
# -*- coding: ISO-8859-1 -*-
# Copyright (c) 2006-2013, Alexis Royer, http://alexis.royer.free.fr/CLI
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CLI library project nor the names of its contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" CLI common lib. """
import modlog
import codecs
import sys
class Cache:
""" Pre-computations. """
def __init__(self):
""" Constructor. """
# All nodes
self.xml_nodes = None
# Main information
self.cli_name = None # Name of the CLI
self.page_title = None # Page title
# Menus
self.menus = [] # Array of cli:cli or cli:menu XML nodes.
@staticmethod
def execute(ctx):
""" Realizes computations.
@param ctx (Cli2xxx) Execution context. """
ctx.cache.menu_refs = [] # Array of cli:menu[@ref] XML nodes.
ctx.cache.endls = [] # Array of cli:endl XML nodes.
ctx.cache.tag_ids = [] # Array of cli:tag[@id] XML nodes.
ctx.cache.tag_refs = [] # Array of cli:tag[@ref] XML nodes.
ctx.cache.read_all_nodes(ctx)
ctx.cache.compute_main_infos(ctx)
ctx.cache.compute_menus(ctx)
ctx.cache.compute_command_lines(ctx)
ctx.cache.compute_tags(ctx)
del ctx.cache.menu_refs
del ctx.cache.endls
del ctx.cache.tag_ids
del ctx.cache.tag_refs
@staticmethod
def read_all_nodes(ctx, xml_node = None):
""" Read all XML nodes and cache everything is worth.
@param ctx (Cli2xxx) Execution context.
@param xml_node (XML node) Current XML node. None for the top call. """
if (xml_node == None):
# First of all, determine whether xml_nodes cache should be set
setattr(ctx.xml.root_node(), "cli_Cli2xxx_cache_test", True)
if (getattr(ctx.xml.root_node(), "cli_Cli2xxx_cache_test", None) == None):
# The XML framework does not return the same native instances for a single node.
# Caching needs to be done for every node so that we rely on constant instances for caching.
ctx.cache.xml_nodes = {}
ctx.cache.global_node_index = 0
ctx.cache.read_all_nodes(ctx, ctx.xml.root_node())
ctx.cache.node(ctx.xml.root_node()).cli_Cli2xxx_child_index = 0
del ctx.cache.global_node_index
else:
if (ctx.xml.is_node(xml_node, "cli:*")):
if (ctx.cache.xml_nodes != None):
# Remember a unique instance for each node
ctx.cache.xml_nodes[xml_node] = xml_node
# Compute the global node identifier
xml_node.cli_Cli2xxx_global_index = ctx.cache.global_node_index
ctx.cache.global_node_index += 1
# Memorize menu nodes
if (ctx.xml.is_node(xml_node, "cli:cli") or ctx.xml.is_node_with_attr(xml_node, "cli:menu", "@name")):
ctx.cache.menus.append(xml_node)
# Reserve cli:menu[@ref] nodes
elif (ctx.xml.is_node_with_attr(xml_node, "cli:menu", "@ref")):
ctx.cache.menu_refs.append(xml_node)
# Reserve cli:endl nodes
elif (ctx.xml.is_node(xml_node, "cli:endl")):
ctx.cache.endls.append(xml_node)
# Reserve cli:tag nodes
elif (ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@id")):
ctx.cache.tag_ids.append(xml_node)
elif (ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@ref")):
ctx.cache.tag_refs.append(xml_node)
# Recursive call
_i_child_node_index = 0
for _xml_child in ctx.xml.children(xml_node):
if (ctx.xml.is_node(_xml_child, "cli:*")):
_xml_child.cli_Cli2xxx_child_index = _i_child_node_index
_i_child_node_index += 1
ctx.cache.read_all_nodes(ctx, _xml_child)
@staticmethod
def compute_main_infos(ctx):
""" Cache main information.
@param ctx (Cli2xxx) Execution context. """
if (ctx.__class__.__name__ == "Cli2Help"):
modlog.log(ctx.MODLOG_FILTER, modlog.TRACE, "Caching main infos...")
ctx.cache.cli_name = ctx.xml.attr_value(ctx.xml.root_node(), "@name")
ctx.cache.page_title = ("%s - %s" % (ctx.cache.cli_name, ctx.Utils.translate(ctx, "Command Line Interface documentation")))
@staticmethod
def compute_menus(ctx):
""" Cache cli:cli or cli:menu list.
@param ctx (Cli2xxx) Execution context. """
modlog.log(ctx.MODLOG_FILTER, modlog.TRACE, "Caching menu list...")
# ctx.cache.menus already computed. See read_all_nodes().
#ctx.cache.menus.extend(ctx.xml.xpath_set(None, "/cli:cli"))
#ctx.cache.menus.extend(ctx.xml.xpath_set(None, "//cli:menu[@name]"))
# For each cli:menu[@ref], find out the respective menu.
for _xml_menu_ref in ctx.cache.menu_refs:
_menu_ref_name = ctx.xml.attr_value(_xml_menu_ref, "@ref")
if (_menu_ref_name == None):
# If _xml_menu_ref, it must be because it has an @ref attribute!
ctx.Utils.abort(ctx, _xml_menu_ref, "Internal error")
for _xml_menu in ctx.cache.menus:
_menu_name = ctx.xml.attr_value(_xml_menu, "@name")
if (_menu_name == None):
ctx.Utils.abort(ctx, _xml_menu, "Missing '@name' attribute for menu")
if (_menu_name == _menu_ref_name):
# Menu reference found
_xml_menu_ref.cli_Cli2xxx_menu = _xml_menu
# Eventually check the menu reference has been resolved
if (getattr(_xml_menu_ref, "cli_Cli2xxx_menu", None) == None):
ctx.Utils.abort(ctx, _xml_menu_ref, "No such menu '%s'" % _menu_ref_name)
@staticmethod
def compute_command_lines(ctx):
""" Cache command line informations. One cli:endl by command line.
Command lines are attached to their respective menu.
@param ctx (Cli2xxx) Execution context. """
modlog.log(ctx.MODLOG_FILTER, modlog.TRACE, "Caching command lines...")
# For each menu, create a command list.
for _xml_menu in ctx.cache.menus:
_xml_menu.cli_Cli2xxx_commands = []
# For each cli:tag[@id], find-out the respective menu.
for _xml_endl in ctx.cache.endls:
_xml_menu = ctx.cache.owner_menu(ctx, _xml_endl)
_xml_menu.cli_Cli2xxx_commands.append(_xml_endl)
@staticmethod
def compute_tags(ctx):
""" Cache tag informations.
@param ctx (Cli2xxx) Execution context. """
modlog.log(ctx.MODLOG_FILTER, modlog.TRACE, "Caching tag informations...")
# For each menu, create tag caches.
for _xml_menu in ctx.cache.menus:
_xml_menu.cli_Cli2xxx_tag_ids = {}
_xml_menu.cli_Cli2xxx_tag_refs = []
# For each cli:tag[@id] nodes, find out the respective menu.
for _xml_tag in ctx.cache.tag_ids:
_xml_menu = ctx.cache.owner_menu(ctx, _xml_tag)
_tag_id = ctx.xml.attr_value(_xml_tag, "@id")
_xml_menu.cli_Cli2xxx_tag_ids[_tag_id] = _xml_tag
_xml_tag.cli_Cli2xxx_tag_refs = []
if (ctx.xml.attr_value(_xml_tag, "@hollow") != "yes"):
# Self reference anticipation.
_xml_tag.cli_Cli2xxx_tag_refs.append(_xml_tag)
# For each cli:tag[@ref], find out the respective menu and cli:tag[@id].
for _xml_tag in ctx.cache.tag_refs:
_xml_menu = ctx.cache.owner_menu(ctx, _xml_tag)
_xml_menu.cli_Cli2xxx_tag_refs.append(_xml_tag)
_tag_id = ctx.xml.attr_value(_xml_tag, "@ref")
if (_xml_menu.cli_Cli2xxx_tag_ids.has_key(_tag_id)):
_xml_menu.cli_Cli2xxx_tag_ids[_tag_id].cli_Cli2xxx_tag_refs.append(_xml_tag)
_xml_tag.cli_Cli2xxx_tag_id = _xml_menu.cli_Cli2xxx_tag_ids[_tag_id]
else:
ctx.Utils.abort(ctx, _xml_tag, "No such tag identifier '%s'" % _tag_id)
# For each tag identifier, find out the latest common node.
for _xml_menu in ctx.cache.menus:
for _tag_id in _xml_menu.cli_Cli2xxx_tag_ids.keys():
_xml_common_parents = None
for _xml_tag_ref in _xml_menu.cli_Cli2xxx_tag_ids[_tag_id].cli_Cli2xxx_tag_refs:
# Compute list of parents.
_xml_parents = []
_xml_parent = _xml_tag_ref
while (_xml_parent != None):
_xml_parents.append(_xml_parent)
if (_xml_parent != _xml_menu):
_xml_parent = ctx.xml.parent_node(_xml_parent)
else:
_xml_parent = None
_xml_parents.reverse()
# Compute the intersection with common parents computed until then.
if (_xml_common_parents == None):
_xml_common_parents = _xml_parents
else:
_xml_common_parents2 = []
for _xml_parent in _xml_common_parents:
if (_xml_parent in _xml_parents):
_xml_common_parents2.append(_xml_parent)
else:
break
_xml_common_parents = _xml_common_parents2
# Store the latest common parent computed
_xml_common_parent = None
if (_xml_common_parents == None):
modlog.log(ctx.MODLOG_FILTER, modlog.WARNING, "No tag reference for tag '%s'" % _tag_id)
elif (len(_xml_common_parents) > 0):
_xml_common_parent = ctx.cache.node(_xml_common_parents[-1])
_xml_tag_id = _xml_menu.cli_Cli2xxx_tag_ids[_tag_id]
_xml_tag_id.cli_Cli2xxx_common_parent = _xml_common_parent
_xml_tag_id.cli_Cli2xxx_is_backward = ctx.xml.is_ancestor_or_self(_xml_tag_id, _xml_common_parent)
@staticmethod
def owner_menu(ctx, xml_node):
""" Computes the cli:cli or cli:menu owner menu that contains the given node.
Returns the instance used for cached data.
@param ctx (Cli2xxx) Execution context.
@param xml_node (XML node) Node to compute the owner menu for.
@return (XML node) cli:cli or cli:menu owner menu. Abortion if anything wrong occured. """
_xml_parent = ctx.xml.parent_node(xml_node)
while (_xml_parent != None):
if (ctx.xml.is_node(_xml_parent, "cli:cli") or ctx.xml.is_node_with_attr(_xml_parent, "cli:menu", "@name")):
return ctx.cache.node(_xml_parent)
_xml_parent = ctx.xml.parent_node(_xml_parent)
ctx.Utils.abort(ctx, xml_node, "No parent menu found for node.")
def node(self, xml_node):
""" Returns the XML node instance that stores cached data.
This method exists because some XML parsers (libxml2 for instance) do not always return the same python instance for a same XML node.
@param xml_node (XML node) XML node.
@return (XML node) XML node that stores cached data for that XML node. """
if (self.xml_nodes == None):
# No cache
return xml_node
else:
return self.xml_nodes[xml_node]
class Output:
""" Generic transformation output class. """
MODLOG_FILTER = "clicommon.Output"
def __init__(self):
""" Constructor. """
#modlog.engine().set_filter(self.MODLOG_FILTER, modlog.DEBUG)
self._codec = None
def set_encoding(self, encoding):
""" Automatic encoding setter.
@param encoding (str) Encoding name. """
try:
self._codec = codecs.lookup(encoding)
except LookupError:
modlog.log(self.MODLOG_FILTER, modlog.WARNING, "Unknown encoding '%s'" % encoding)
def open(self):
""" Open the output stream.
@return (boolean) True for success, False otherwise. """
if (not(self._open())):
return False
else:
return True
def _open(self):
""" Overridable output stream opening method.
@return (boolean) True for success, False otherwise. """
# Equivalent to True, but prevents pylint warnings:
# - R0201:157,4:Output._open: Method could be a function
return (self != None)
def close(self):
""" Close the output stream.
@return (boolean) True for success, False otherwise. """
if (self._close != None):
return self._close()
else:
return True
def _close(self):
""" Overridable output stream closure method.
@return (boolean) True for success, False otherwise. """
if (not(self._close())):
return False
else:
return True
def put(self, text):
""" Output a line.
@param text (str) Output text.
@return (Output) Self. """
#modlog.log(self.MODLOG_FILTER, modlog.DEBUG, "put(text = '%s')" % text)
if (self._codec != None):
text = self._codec.encode(text)[0]
if (self._put(text)):
pass
return self
def _put(self, text):
""" Overridable line output method.
@param text (str) Output text.
@return (boolean) True for success, False otherwise. """
# Replacement for True, prevents pylint warnings:
# - W0613:183,19:Output._put: Unused argument 'text'
# - R0201:183,4:Output._put: Method could be a function)
return ((self != None) and (text != None))
def endl(self):
""" Output end of line.
@return (Output) Self. """
return self.put("\n")
class Console (Output):
""" Output result of transformation in the console. """
def __init__(self):
""" Constructor. """
Output.__init__(self)
def _put(self, text):
""" Output text in the console.
@param text (str) Output text.
@return (boolean) True for success, False otherwise. """
sys.stdout.write(text)
return True
class CtxUtils:
""" Common utils routines. """
def __init__(self):
""" Constructor. """
# Static class, nothing to be done
pass
@staticmethod
def version():
""" Version accessor.
@return (str) Program version. """
return ("CLI library %s (%s, %s)" % (CtxUtils.short_version(), CtxUtils.author_name(), CtxUtils.url()))
@staticmethod
def short_version():
""" Short version accessor.
@return (str) Short program version. """
return "2.8"
@staticmethod
def author_name():
""" Author name accessor.
@return (str) Author name. """
return "Alexis Royer"
@staticmethod
def url():
""" URL accessor.
@return (str) URL. """
return "http://alexis.royer.free.fr/CLI/"
@staticmethod
def node_location(ctx, xml_node):
""" Computes a location for the given node.
@param ctx (Cli2xxx) Execution context.
@param xml_node (XML node) Node to compute a location for.
@return (str) Node location. """
if (xml_node == None):
return "(root)"
elif (ctx.xml.is_node(xml_node, "cli:cli")):
return "cli:cli"
elif (ctx.xml.is_node_with_attr(xml_node, "cli:menu", "@name")):
return "cli:menu[@name='%s']" % ctx.xml.attr_value(xml_node, "@name")
elif (ctx.xml.is_node_with_attr(xml_node, "cli:menu", "@ref")):
return "cli:menu[@ref='%s']" % ctx.xml.attr_value(xml_node, "@ref")
elif (ctx.xml.is_node_with_attr(xml_node, "cli:keyword", "@string")):
return "cli:keyword[@string='%s']" % ctx.xml.attr_value(xml_node, "@string")
elif (ctx.xml.is_node_with_attr(xml_node, "cli:param", "@id")):
return "cli:param[@id='%s']" % ctx.xml.attr_value(xml_node, "@id")
elif (ctx.xml.is_node_with_attr(xml_node, "cli:param", "@ref")):
return "cli:param[@ref='%s']" % ctx.xml.attr_value(xml_node, "@ref")
elif (ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@id")):
return "cli:tag[@id='%s']" % ctx.xml.attr_value(xml_node, "@id")
elif (ctx.xml.is_node_with_attr(xml_node, "cli:tag", "@ref")):
return "cli:tag[@ref='%s']" % ctx.xml.attr_value(xml_node, "@ref")
else:
return ctx.xml.node_name(xml_node)
class ExecutionStoppedException (Exception):
""" Exception raised when the transformation execution has stopped because of an error. """
@staticmethod
def abort(ctx, xml_node, message):
""" Abort current execution.
@param ctx (Cli2xxx) Execution context.
@param xml_node (XML node) Context node.
@param message (str) Error message.
@throw (...) """
modlog.log(ctx.MODLOG_FILTER, modlog.ERROR, ctx.Utils.node_location(ctx, xml_node) + ": " + message)
raise ctx.Utils.ExecutionStoppedException()
@staticmethod
def warn(ctx, xml_node, message):
""" Display warning.
@param ctx (Cli2xxx) Execution context.
@param xml_node (XML node) Context node.
@param message (str) Warning message. """
modlog.log(ctx.MODLOG_FILTER, modlog.WARNING, ctx.Utils.node_location(ctx, xml_node) + ": " + message)
|
|
"""
Copyright 2015 Basho Technologies, Inc.
Copyright 2010 Rusty Klophaus <rusty@basho.com>
Copyright 2010 Justin Sheehy <justin@basho.com>
Copyright 2009 Jay Baird <jay@mochimedia.com>
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import riak_pb
from riak import RiakError
from riak.transports.transport import RiakTransport
from riak.riak_object import VClock
from riak.util import decode_index_value, str_to_bytes, bytes_to_str
from riak.transports.pbc.connection import RiakPbcConnection
from riak.transports.pbc.stream import (RiakPbcKeyStream,
RiakPbcMapredStream,
RiakPbcBucketStream,
RiakPbcIndexStream)
from riak.transports.pbc.codec import RiakPbcCodec
from six import PY2, PY3
from riak_pb.messages import (
MSG_CODE_PING_REQ,
MSG_CODE_PING_RESP,
MSG_CODE_GET_CLIENT_ID_REQ,
MSG_CODE_GET_CLIENT_ID_RESP,
MSG_CODE_SET_CLIENT_ID_REQ,
MSG_CODE_SET_CLIENT_ID_RESP,
MSG_CODE_GET_SERVER_INFO_REQ,
MSG_CODE_GET_SERVER_INFO_RESP,
MSG_CODE_GET_REQ,
MSG_CODE_GET_RESP,
MSG_CODE_PUT_REQ,
MSG_CODE_PUT_RESP,
MSG_CODE_DEL_REQ,
MSG_CODE_DEL_RESP,
MSG_CODE_LIST_BUCKETS_REQ,
MSG_CODE_LIST_BUCKETS_RESP,
MSG_CODE_LIST_KEYS_REQ,
MSG_CODE_GET_BUCKET_REQ,
MSG_CODE_GET_BUCKET_RESP,
MSG_CODE_SET_BUCKET_REQ,
MSG_CODE_SET_BUCKET_RESP,
MSG_CODE_GET_BUCKET_TYPE_REQ,
MSG_CODE_SET_BUCKET_TYPE_REQ,
MSG_CODE_MAP_RED_REQ,
MSG_CODE_INDEX_REQ,
MSG_CODE_INDEX_RESP,
MSG_CODE_SEARCH_QUERY_REQ,
MSG_CODE_SEARCH_QUERY_RESP,
MSG_CODE_RESET_BUCKET_REQ,
MSG_CODE_RESET_BUCKET_RESP,
MSG_CODE_COUNTER_UPDATE_REQ,
MSG_CODE_COUNTER_UPDATE_RESP,
MSG_CODE_COUNTER_GET_REQ,
MSG_CODE_COUNTER_GET_RESP,
MSG_CODE_YOKOZUNA_INDEX_GET_REQ,
MSG_CODE_YOKOZUNA_INDEX_GET_RESP,
MSG_CODE_YOKOZUNA_INDEX_PUT_REQ,
MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ,
MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ,
MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP,
MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ,
MSG_CODE_DT_FETCH_REQ,
MSG_CODE_DT_FETCH_RESP,
MSG_CODE_DT_UPDATE_REQ,
MSG_CODE_DT_UPDATE_RESP
)
class RiakPbcTransport(RiakTransport, RiakPbcConnection, RiakPbcCodec):
"""
The RiakPbcTransport object holds a connection to the protocol
buffers interface on the riak server.
"""
def __init__(self,
node=None,
client=None,
timeout=None,
*unused_options):
"""
Construct a new RiakPbcTransport object.
"""
super(RiakPbcTransport, self).__init__()
self._client = client
self._node = node
self._address = (node.host, node.pb_port)
self._timeout = timeout
self._socket = None
# FeatureDetection API
def _server_version(self):
return bytes_to_str(self.get_server_info()['server_version'])
def ping(self):
"""
Ping the remote server
"""
msg_code, msg = self._request(MSG_CODE_PING_REQ)
if msg_code == MSG_CODE_PING_RESP:
return True
else:
return False
def get_server_info(self):
"""
Get information about the server
"""
msg_code, resp = self._request(MSG_CODE_GET_SERVER_INFO_REQ,
expect=MSG_CODE_GET_SERVER_INFO_RESP)
return {'node': bytes_to_str(resp.node),
'server_version': bytes_to_str(resp.server_version)}
def _get_client_id(self):
msg_code, resp = self._request(MSG_CODE_GET_CLIENT_ID_REQ,
expect=MSG_CODE_GET_CLIENT_ID_RESP)
return bytes_to_str(resp.client_id)
def _set_client_id(self, client_id):
req = riak_pb.RpbSetClientIdReq()
req.client_id = str_to_bytes(client_id)
msg_code, resp = self._request(MSG_CODE_SET_CLIENT_ID_REQ, req,
MSG_CODE_SET_CLIENT_ID_RESP)
self._client_id = client_id
client_id = property(_get_client_id, _set_client_id,
doc="""the client ID for this connection""")
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None,
notfound_ok=None):
"""
Serialize get request and deserialize response
"""
bucket = robj.bucket
req = riak_pb.RpbGetReq()
if r:
req.r = self._encode_quorum(r)
if self.quorum_controls():
if pr:
req.pr = self._encode_quorum(pr)
if basic_quorum is not None:
req.basic_quorum = basic_quorum
if notfound_ok is not None:
req.notfound_ok = notfound_ok
if self.client_timeouts() and timeout:
req.timeout = timeout
if self.tombstone_vclocks():
req.deletedvclock = True
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
req.key = str_to_bytes(robj.key)
msg_code, resp = self._request(MSG_CODE_GET_REQ, req,
MSG_CODE_GET_RESP)
if resp is not None:
if resp.HasField('vclock'):
robj.vclock = VClock(resp.vclock, 'binary')
# We should do this even if there are no contents, i.e.
# the object is tombstoned
self._decode_contents(resp.content, robj)
else:
# "not found" returns an empty message,
# so let's make sure to clear the siblings
robj.siblings = []
return robj
def put(self, robj, w=None, dw=None, pw=None, return_body=True,
if_none_match=False, timeout=None):
bucket = robj.bucket
req = riak_pb.RpbPutReq()
if w:
req.w = self._encode_quorum(w)
if dw:
req.dw = self._encode_quorum(dw)
if self.quorum_controls() and pw:
req.pw = self._encode_quorum(pw)
if return_body:
req.return_body = 1
if if_none_match:
req.if_none_match = 1
if self.client_timeouts() and timeout:
req.timeout = timeout
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
if robj.key:
req.key = str_to_bytes(robj.key)
if robj.vclock:
req.vclock = robj.vclock.encode('binary')
self._encode_content(robj, req.content)
msg_code, resp = self._request(MSG_CODE_PUT_REQ, req,
MSG_CODE_PUT_RESP)
if resp is not None:
if resp.HasField('key'):
robj.key = bytes_to_str(resp.key)
if resp.HasField("vclock"):
robj.vclock = VClock(resp.vclock, 'binary')
if resp.content:
self._decode_contents(resp.content, robj)
elif not robj.key:
raise RiakError("missing response object")
return robj
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None,
timeout=None):
req = riak_pb.RpbDelReq()
if rw:
req.rw = self._encode_quorum(rw)
if r:
req.r = self._encode_quorum(r)
if w:
req.w = self._encode_quorum(w)
if dw:
req.dw = self._encode_quorum(dw)
if self.quorum_controls():
if pr:
req.pr = self._encode_quorum(pr)
if pw:
req.pw = self._encode_quorum(pw)
if self.client_timeouts() and timeout:
req.timeout = timeout
use_vclocks = (self.tombstone_vclocks() and
hasattr(robj, 'vclock') and robj.vclock)
if use_vclocks:
req.vclock = robj.vclock.encode('binary')
bucket = robj.bucket
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
req.key = str_to_bytes(robj.key)
msg_code, resp = self._request(MSG_CODE_DEL_REQ, req,
MSG_CODE_DEL_RESP)
return self
def get_keys(self, bucket, timeout=None):
"""
Lists all keys within a bucket.
"""
keys = []
for keylist in self.stream_keys(bucket, timeout=timeout):
for key in keylist:
keys.append(bytes_to_str(key))
return keys
def stream_keys(self, bucket, timeout=None):
"""
Streams keys from a bucket, returning an iterator that yields
lists of keys.
"""
req = riak_pb.RpbListKeysReq()
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
if self.client_timeouts() and timeout:
req.timeout = timeout
self._send_msg(MSG_CODE_LIST_KEYS_REQ, req)
return RiakPbcKeyStream(self)
def get_buckets(self, bucket_type=None, timeout=None):
"""
Serialize bucket listing request and deserialize response
"""
req = riak_pb.RpbListBucketsReq()
self._add_bucket_type(req, bucket_type)
if self.client_timeouts() and timeout:
req.timeout = timeout
msg_code, resp = self._request(MSG_CODE_LIST_BUCKETS_REQ, req,
MSG_CODE_LIST_BUCKETS_RESP)
return resp.buckets
def stream_buckets(self, bucket_type=None, timeout=None):
"""
Stream list of buckets through an iterator
"""
if not self.bucket_stream():
raise NotImplementedError('Streaming list-buckets is not '
'supported')
req = riak_pb.RpbListBucketsReq()
req.stream = True
self._add_bucket_type(req, bucket_type)
# Bucket streaming landed in the same release as timeouts, so
# we don't need to check the capability.
if timeout:
req.timeout = timeout
self._send_msg(MSG_CODE_LIST_BUCKETS_REQ, req)
return RiakPbcBucketStream(self)
def get_bucket_props(self, bucket):
"""
Serialize bucket property request and deserialize response
"""
req = riak_pb.RpbGetBucketReq()
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
msg_code, resp = self._request(MSG_CODE_GET_BUCKET_REQ, req,
MSG_CODE_GET_BUCKET_RESP)
return self._decode_bucket_props(resp.props)
def set_bucket_props(self, bucket, props):
"""
Serialize set bucket property request and deserialize response
"""
req = riak_pb.RpbSetBucketReq()
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
if not self.pb_all_bucket_props():
for key in props:
if key not in ('n_val', 'allow_mult'):
raise NotImplementedError('Server only supports n_val and '
'allow_mult properties over PBC')
self._encode_bucket_props(props, req)
msg_code, resp = self._request(MSG_CODE_SET_BUCKET_REQ, req,
MSG_CODE_SET_BUCKET_RESP)
return True
def clear_bucket_props(self, bucket):
"""
Clear bucket properties, resetting them to their defaults
"""
if not self.pb_clear_bucket_props():
return False
req = riak_pb.RpbResetBucketReq()
req.bucket = str_to_bytes(bucket.name)
self._add_bucket_type(req, bucket.bucket_type)
self._request(MSG_CODE_RESET_BUCKET_REQ, req,
MSG_CODE_RESET_BUCKET_RESP)
return True
def get_bucket_type_props(self, bucket_type):
"""
Fetch bucket-type properties
"""
self._check_bucket_types(bucket_type)
req = riak_pb.RpbGetBucketTypeReq()
req.type = str_to_bytes(bucket_type.name)
msg_code, resp = self._request(MSG_CODE_GET_BUCKET_TYPE_REQ, req,
MSG_CODE_GET_BUCKET_RESP)
return self._decode_bucket_props(resp.props)
def set_bucket_type_props(self, bucket_type, props):
"""
Set bucket-type properties
"""
self._check_bucket_types(bucket_type)
req = riak_pb.RpbSetBucketTypeReq()
req.type = str_to_bytes(bucket_type.name)
self._encode_bucket_props(props, req)
msg_code, resp = self._request(MSG_CODE_SET_BUCKET_TYPE_REQ, req,
MSG_CODE_SET_BUCKET_RESP)
return True
def mapred(self, inputs, query, timeout=None):
# dictionary of phase results - each content should be an encoded array
# which is appended to the result for that phase.
result = {}
for phase, content in self.stream_mapred(inputs, query, timeout):
if phase in result:
result[phase] += content
else:
result[phase] = content
# If a single result - return the same as the HTTP interface does
# otherwise return all the phase information
if not len(result):
return None
elif len(result) == 1:
return result[max(result.keys())]
else:
return result
def stream_mapred(self, inputs, query, timeout=None):
# Construct the job, optionally set the timeout...
content = self._construct_mapred_json(inputs, query, timeout)
req = riak_pb.RpbMapRedReq()
req.request = str_to_bytes(content)
req.content_type = str_to_bytes("application/json")
self._send_msg(MSG_CODE_MAP_RED_REQ, req)
return RiakPbcMapredStream(self)
def get_index(self, bucket, index, startkey, endkey=None,
return_terms=None, max_results=None, continuation=None,
timeout=None, term_regex=None):
if not self.pb_indexes():
return self._get_index_mapred_emu(bucket, index, startkey, endkey)
if term_regex and not self.index_term_regex():
raise NotImplementedError("Secondary index term_regex is not "
"supported")
req = self._encode_index_req(bucket, index, startkey, endkey,
return_terms, max_results, continuation,
timeout, term_regex)
msg_code, resp = self._request(MSG_CODE_INDEX_REQ, req,
MSG_CODE_INDEX_RESP)
if return_terms and resp.results:
results = [(decode_index_value(index, pair.key),
bytes_to_str(pair.value))
for pair in resp.results]
else:
results = resp.keys[:]
if PY3:
results = [bytes_to_str(key) for key in resp.keys]
if max_results is not None and resp.HasField('continuation'):
return (results, bytes_to_str(resp.continuation))
else:
return (results, None)
def stream_index(self, bucket, index, startkey, endkey=None,
return_terms=None, max_results=None, continuation=None,
timeout=None, term_regex=None):
if not self.stream_indexes():
raise NotImplementedError("Secondary index streaming is not "
"supported")
if term_regex and not self.index_term_regex():
raise NotImplementedError("Secondary index term_regex is not "
"supported")
req = self._encode_index_req(bucket, index, startkey, endkey,
return_terms, max_results, continuation,
timeout, term_regex)
req.stream = True
self._send_msg(MSG_CODE_INDEX_REQ, req)
return RiakPbcIndexStream(self, index, return_terms)
def create_search_index(self, index, schema=None, n_val=None):
if not self.pb_search_admin():
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
index = str_to_bytes(index)
idx = riak_pb.RpbYokozunaIndex(name=index)
if schema:
idx.schema = str_to_bytes(schema)
if n_val:
idx.n_val = n_val
req = riak_pb.RpbYokozunaIndexPutReq(index=idx)
self._request(MSG_CODE_YOKOZUNA_INDEX_PUT_REQ, req,
MSG_CODE_PUT_RESP)
return True
def get_search_index(self, index):
if not self.pb_search_admin():
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
req = riak_pb.RpbYokozunaIndexGetReq(name=str_to_bytes(index))
msg_code, resp = self._request(MSG_CODE_YOKOZUNA_INDEX_GET_REQ, req,
MSG_CODE_YOKOZUNA_INDEX_GET_RESP)
if len(resp.index) > 0:
return self._decode_search_index(resp.index[0])
else:
raise RiakError('notfound')
def list_search_indexes(self):
if not self.pb_search_admin():
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
req = riak_pb.RpbYokozunaIndexGetReq()
msg_code, resp = self._request(MSG_CODE_YOKOZUNA_INDEX_GET_REQ, req,
MSG_CODE_YOKOZUNA_INDEX_GET_RESP)
return [self._decode_search_index(index) for index in resp.index]
def delete_search_index(self, index):
if not self.pb_search_admin():
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
req = riak_pb.RpbYokozunaIndexDeleteReq(name=str_to_bytes(index))
self._request(MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ, req,
MSG_CODE_DEL_RESP)
return True
def create_search_schema(self, schema, content):
if not self.pb_search_admin():
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
scma = riak_pb.RpbYokozunaSchema(name=str_to_bytes(schema),
content=str_to_bytes(content))
req = riak_pb.RpbYokozunaSchemaPutReq(schema=scma)
self._request(MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ, req,
MSG_CODE_PUT_RESP)
return True
def get_search_schema(self, schema):
if not self.pb_search_admin():
raise NotImplementedError("Search 2.0 administration is not "
"supported for this version")
req = riak_pb.RpbYokozunaSchemaGetReq(name=str_to_bytes(schema))
msg_code, resp = self._request(MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ, req,
MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP)
result = {}
result['name'] = bytes_to_str(resp.schema.name)
result['content'] = bytes_to_str(resp.schema.content)
return result
def search(self, index, query, **params):
if not self.pb_search():
return self._search_mapred_emu(index, query)
if PY2 and isinstance(query, unicode):
query = query.encode('utf8')
req = riak_pb.RpbSearchQueryReq(index=str_to_bytes(index),
q=str_to_bytes(query))
self._encode_search_query(req, params)
msg_code, resp = self._request(MSG_CODE_SEARCH_QUERY_REQ, req,
MSG_CODE_SEARCH_QUERY_RESP)
result = {}
if resp.HasField('max_score'):
result['max_score'] = resp.max_score
if resp.HasField('num_found'):
result['num_found'] = resp.num_found
result['docs'] = [self._decode_search_doc(doc) for doc in resp.docs]
return result
def get_counter(self, bucket, key, **params):
if not bucket.bucket_type.is_default():
raise NotImplementedError("Counters are not "
"supported with bucket-types, "
"use datatypes instead.")
if not self.counters():
raise NotImplementedError("Counters are not supported")
req = riak_pb.RpbCounterGetReq()
req.bucket = str_to_bytes(bucket.name)
req.key = str_to_bytes(key)
if params.get('r') is not None:
req.r = self._encode_quorum(params['r'])
if params.get('pr') is not None:
req.pr = self._encode_quorum(params['pr'])
if params.get('basic_quorum') is not None:
req.basic_quorum = params['basic_quorum']
if params.get('notfound_ok') is not None:
req.notfound_ok = params['notfound_ok']
msg_code, resp = self._request(MSG_CODE_COUNTER_GET_REQ, req,
MSG_CODE_COUNTER_GET_RESP)
if resp.HasField('value'):
return resp.value
else:
return None
def update_counter(self, bucket, key, value, **params):
if not bucket.bucket_type.is_default():
raise NotImplementedError("Counters are not "
"supported with bucket-types, "
"use datatypes instead.")
if not self.counters():
raise NotImplementedError("Counters are not supported")
req = riak_pb.RpbCounterUpdateReq()
req.bucket = str_to_bytes(bucket.name)
req.key = str_to_bytes(key)
req.amount = value
if params.get('w') is not None:
req.w = self._encode_quorum(params['w'])
if params.get('dw') is not None:
req.dw = self._encode_quorum(params['dw'])
if params.get('pw') is not None:
req.pw = self._encode_quorum(params['pw'])
if params.get('returnvalue') is not None:
req.returnvalue = params['returnvalue']
msg_code, resp = self._request(MSG_CODE_COUNTER_UPDATE_REQ, req,
MSG_CODE_COUNTER_UPDATE_RESP)
if resp.HasField('value'):
return resp.value
else:
return True
def fetch_datatype(self, bucket, key, **options):
if bucket.bucket_type.is_default():
raise NotImplementedError("Datatypes cannot be used in the default"
" bucket-type.")
if not self.datatypes():
raise NotImplementedError("Datatypes are not supported.")
req = riak_pb.DtFetchReq()
req.type = str_to_bytes(bucket.bucket_type.name)
req.bucket = str_to_bytes(bucket.name)
req.key = str_to_bytes(key)
self._encode_dt_options(req, options)
msg_code, resp = self._request(MSG_CODE_DT_FETCH_REQ, req,
MSG_CODE_DT_FETCH_RESP)
return self._decode_dt_fetch(resp)
def update_datatype(self, datatype, **options):
if datatype.bucket.bucket_type.is_default():
raise NotImplementedError("Datatypes cannot be used in the default"
" bucket-type.")
if not self.datatypes():
raise NotImplementedError("Datatypes are not supported.")
op = datatype.to_op()
type_name = datatype.type_name
if not op:
raise ValueError("No operation to send on datatype {!r}".
format(datatype))
req = riak_pb.DtUpdateReq()
req.bucket = str_to_bytes(datatype.bucket.name)
req.type = str_to_bytes(datatype.bucket.bucket_type.name)
if datatype.key:
req.key = str_to_bytes(datatype.key)
if datatype._context:
req.context = datatype._context
self._encode_dt_options(req, options)
self._encode_dt_op(type_name, req, op)
msg_code, resp = self._request(MSG_CODE_DT_UPDATE_REQ, req,
MSG_CODE_DT_UPDATE_RESP)
if resp.HasField('key'):
datatype.key = resp.key[:]
if resp.HasField('context'):
datatype._context = resp.context[:]
if options.get('return_body'):
datatype._set_value(self._decode_dt_value(type_name, resp))
return True
|
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#!/usr/bin/env python
'''
Little example on how to use the Network class to create a model and perform
a basic classification of the MNIST dataset
'''
#from NumPyNet.layers.input_layer import Input_layer
from NumPyNet.layers.connected_layer import Connected_layer
from NumPyNet.layers.convolutional_layer import Convolutional_layer
from NumPyNet.layers.maxpool_layer import Maxpool_layer
from NumPyNet.layers.softmax_layer import Softmax_layer
# from NumPyNet.layers.dropout_layer import Dropout_layer
# from NumPyNet.layers.cost_layer import Cost_layer
# from NumPyNet.layers.cost_layer import cost_type
from NumPyNet.layers.batchnorm_layer import BatchNorm_layer
from NumPyNet.network import Network
from NumPyNet.optimizer import Adam
# from NumPyNet.optimizer import Adam, SGD, Momentum
from NumPyNet.utils import to_categorical
from NumPyNet.utils import from_categorical
from NumPyNet.metrics import mean_accuracy_score
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
def accuracy (y_true, y_pred):
'''
Temporary metrics to overcome "from_categorical" missing in standard metrics
'''
truth = from_categorical(y_true)
predicted = from_categorical(y_pred)
return mean_accuracy_score(truth, predicted)
# In[32]:
np.random.seed(123)
digits = datasets.load_digits()
X, y = digits.images, digits.target
# del digits
# add channels to images
X = np.asarray([np.dstack((x, x, x)) for x in X])
X = X.transpose(0, 2, 3, 1)
print("X shape",X.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=.33,
random_state=42)
batch = 128
num_classes = len(set(y))
# del X, y
# normalization to [0, 1]
X_train *= 1. / 255.
X_test *= 1. / 255.
# reduce the size of the data set for testing
############################################
train_size = 512
test_size = 300
X_train = X_train[:train_size, ...]
y_train = y_train[:train_size]
X_test = X_test[ :test_size, ...]
y_test = y_test[ :test_size]
############################################
n_train = X_train.shape[0]
n_test = X_test.shape[0]
# transform y to array of dimension 10 and in 4 dimension
y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1)
y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1)
# Create the model and training
model = Network(batch=batch, input_shape=X_train.shape[1:])
model.add(Convolutional_layer(size=3, filters=32, stride=1, pad=True, activation='Relu'))
model.add(BatchNorm_layer())
model.add(Maxpool_layer(size=2, stride=1, padding=True))
model.add(Connected_layer(outputs=100, activation='Relu'))
model.add(BatchNorm_layer())
model.add(Connected_layer(outputs=num_classes, activation='Linear'))
model.add(Softmax_layer(spatial=True, groups=1, temperature=1.))
# model.add(Cost_layer(cost_type=cost_type.mse))
# model.compile(optimizer=SGD(lr=0.01, decay=0., lr_min=0., lr_max=np.inf))
model.compile(optimizer=Adam(), metrics=[accuracy])
print('*************************************')
print('\n Total input dimension: {}'.format(X_train.shape), '\n')
print('**************MODEL SUMMARY***********')
model.summary()
print('\n***********START TRAINING***********\n')
# Fit the model on the training set
model.fit(X=X_train, y=y_train, max_iter=10, verbose=True)
print('\n***********START TESTING**************\n')
# Test the prediction with timing
loss, out = model.evaluate(X=X_test, truth=y_test, verbose=True)
truth = from_categorical(y_test)
predicted = from_categorical(out)
accuracy2 = mean_accuracy_score(truth, predicted)
print('\nLoss Score: {:.3f}'.format(loss))
print('Accuracy Score: {:.3f}'.format(accuracy2))
# SGD : best score I could obtain was 94% with 10 epochs, lr = 0.01 %
# Momentum : best score I could obtain was 93% with 10 epochs
# Adam : best score I could obtain was 95% with 10 epochs
# In[33]:
X.dtype
# In[34]:
X.shape
# In[ ]:
# In[3]:
digits = datasets.load_digits()
X, y = digits.images, digits.target
# In[4]:
X.shape
# In[17]:
import classy
images=classy.image.load_images('data/digits')
# In[18]:
images.keys()
# In[19]:
len(images.data)
# In[20]:
images_color=classy.image.load_images('data/all_pieces')
# In[21]:
images_color.data[0].shape
# In[22]:
X2 = np.asarray([np.dstack((x, x, x)) for x in X])
X2 = X2.transpose(0, 2, 3, 1)
X2.shape
# In[23]:
im=images_color
if len(im.data[0].shape)==2: # grayscale
X=np.array(im.data)
X2 = np.asarray([np.dstack((x, x, x)) for x in X])
X2 = X2.transpose(0, 2, 3, 1)
else:
X=np.array(im.data)
X2 = X[:]
X2 = X2.transpose(0, 2, 3, 1)
X.shape,X2.shape
# In[42]:
images=images_color
images_train,images_test=classy.image.split(images,verbose=False)
classy.summary(images_train)
classy.summary(images_test)
# In[51]:
im=images_train
n_train=len(im.data)
num_classes=len(im.target_names)
if len(im.data[0].shape)==2: # grayscale
X=np.array(im.data)
X = np.asarray([np.dstack((x, x, x)) for x in X])
X = X.transpose(0, 2, 3, 1)
else:
X=np.array(im.data)
X = X.transpose(0, 2, 3, 1)
# normalization to [0, 1]
X = X/X.max()
y = to_categorical(im.targets).reshape(n_train, 1, 1, -1)
# In[52]:
# Create the model and training
model = Network(batch=n_train, input_shape=X.shape[1:])
model.add(Convolutional_layer(size=3, filters=32, stride=1, pad=True, activation='Relu'))
model.add(BatchNorm_layer())
model.add(Maxpool_layer(size=2, stride=1, padding=True))
model.add(Connected_layer(outputs=100, activation='Relu'))
model.add(BatchNorm_layer())
model.add(Connected_layer(outputs=num_classes, activation='Linear'))
model.add(Softmax_layer(spatial=True, groups=1, temperature=1.))
# model.add(Cost_layer(cost_type=cost_type.mse))
# model.compile(optimizer=SGD(lr=0.01, decay=0., lr_min=0., lr_max=np.inf))
model.compile(optimizer=Adam(), metrics=[accuracy])
print('*************************************')
print('\n Total input dimension: {}'.format(X_train.shape), '\n')
print('**************MODEL SUMMARY***********')
model.summary()
print('\n***********START TRAINING***********\n')
# Fit the model on the training set
model.fit(X=X, y=y, max_iter=10, verbose=True)
# In[53]:
im=images_test
n_test=len(im.data)
num_classes=len(im.target_names)
if len(im.data[0].shape)==2: # grayscale
X=np.array(im.data)
X = np.asarray([np.dstack((x, x, x)) for x in X])
X = X.transpose(0, 2, 3, 1)
else:
X=np.array(im.data)
X = X.transpose(0, 2, 3, 1)
# normalization to [0, 1]
X = X/X.max()
y = to_categorical(im.targets).reshape(n_test, 1, 1, -1)
# In[54]:
# Test the prediction with timing
model.batch=n_test
loss, out = model.evaluate(X=X, truth=y, verbose=True)
truth = from_categorical(y)
predicted = from_categorical(out)
accuracy2 = mean_accuracy_score(truth, predicted)
print('\nLoss Score: {:.3f}'.format(loss))
print('Accuracy Score: {:.3f}'.format(accuracy2))
# In[47]:
n_test
# In[41]:
X.shape
# In[ ]:
|
|
# Copyright (c) 2015 William Lees
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# A class to manage and interpret CDR definitions specified against an Alignment.
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
import os
import sys
from Alignment import Alignment
class AnalyseCDR():
"""
Create the class against the specified alignment. Cdr positions are specified either as a list of ranges, or
in a file. If in a file, the file should contain a single line consisting of six positions, separated by commas.
>>> msa = Alignment("testfiles/test_cdr.fasta")
>>> pos = msa.read_position_numbers(file_name="testfiles/test_cdr_seqnum.txt")
>>> acdr = AnalyseCDR(msa, file_name="testfiles/test_cdr_range.txt")
>>> print acdr.get_cdrs(msa[0])
['QEEEEK', 'QGGGGK', 'QLLLLK']
"""
def __init__(self, alignment, cdrs=None, file_name=None):
self.cdrs = cdrs
if file_name:
self.__read_cdrs(file_name)
self.alignment = alignment
self.__adjust_cdrs()
def __adjust_cdrs(self):
if self.cdrs and self.alignment:
# Adjust cdr3 upper bound, to allow for the possibility that in the file it might be set higher than the end of the alignment we have
if self.cdrs[2][1] != "0" and not self.alignment.within_positions(self.cdrs[2][1]):
self.cdrs[2][1] = self.alignment.max_position()
# Check all lower bounds, and adjust if the lower bound is outside the alignment, but the upper bound is inside
for i in range(3):
if self.alignment.within_positions(self.cdrs[i][1]) and not self.alignment.within_positions(self.cdrs[i][0]):
self.cdrs[i][0] = self.alignment.min_position()
self.range_cdr = []
for i in range(3):
if self.alignment.within_positions(self.cdrs[i][0]) and self.alignment.within_positions(self.cdrs[i][1]):
self.range_cdr.append(range(self.alignment.index_of(str(self.cdrs[i][0])), self.alignment.index_of(str(self.cdrs[i][1]))+1))
else:
self.range_cdr.append(range(-1, -1))
def analyse(self):
"""
Return an html table showing which CDR positions in the alignment set are common to germline (the first record),
common across all sequences except the germline, and which vary between sequences
>>> msa = Alignment("testfiles/test_cdr.fasta")
>>> pos = msa.read_position_numbers(file_name="testfiles/test_cdr_seqnum.txt")
>>> acdr = AnalyseCDR(msa, file_name="testfiles/test_cdr_range.txt")
>>> print acdr.analyse()
<table class='table' border=1>
<tr><th></th><th>Conserved to Germline</th><th>Common to All</th><th>Variations Across Samples</th></tr>
<tr><td>CDR1</td><td>20Q,23E,24E,25K</td><td>21H,22H</td><td></td></tr>
<tr><td>CDR2</td><td>30Q,32G,33G,34G,35K</td><td></td><td>31:AS</td></tr>
<tr><td>CDR3</td><td>40Q,41L,42L,43L,44L,45K</td><td></td><td></td></tr>
</table>
<BLANKLINE>
"""
common_to_germline = []
for i in range(0, len(self.alignment[0])):
value = None
common = True
for rec in self.alignment:
if not "node #" in rec.id:
if value is None:
value = rec.seq[i]
elif rec.seq[i] != value:
common = False
break
common_to_germline.append(common)
common_to_descendents = []
for i in range(0, len(self.alignment[0])):
value = None
common = True
for (ind, rec) in enumerate(self.alignment):
if ind != 0 and not "node #" in rec.id:
if value is None:
value = rec.seq[i]
elif rec.seq[i] != value:
common = False
break
common_to_descendents.append(common)
ctg = [[], [], []]
ctd = [[], [], []]
leaves = [[], [], []]
for i in range(3):
for j in range(0, len(self.alignment[0])):
if j in self.range_cdr[i]:
if common_to_germline[j]:
ctg[i].append(self.alignment.position_of(j) + str(self.alignment[0].seq[j]))
elif common_to_descendents[j]:
ctd[i].append(self.alignment.position_of(j) + str(self.alignment[1].seq[j]))
else:
variants = []
for (ind, rec) in enumerate(self.alignment):
if ind != 0 and not "node #" in rec.id:
if rec.seq[j] not in variants:
variants.append(rec.seq[j])
variants.sort()
leaves[i].append(self.alignment.position_of(j) + ":" + "".join(variants))
output = "<table class='table' border=1>\n<tr><th></th><th>Conserved to Germline</th><th>Common to All</th><th>Variations Across Samples</th></tr>\n"
for i in range(3):
output += "<tr><td>CDR" + str(i+1) + "</td><td>" + ",".join(ctg[i]) + "</td><td>" + ",".join(ctd[i]) + "</td><td>" + ",".join(leaves[i]) + "</td></tr>\n"
output += "</table>\n"
return output
def __read_cdrs(self, cdrfile):
fi = open(cdrfile, "r")
line = fi.readline().strip().replace(" ", "")
if len(line) == 0:
return
self.cdrs = [[], [], []]
incdrs = line.split(",")
if len(incdrs) != 6:
raise AttributeError("CDR file must contain exactly 6 positions on one line, separated by commas")
for i in range(3):
a = str(incdrs[i*2])
b = str(incdrs[i*2 + 1])
self.cdrs[i].append(a)
self.cdrs[i].append(b)
return
# Return a vector containing the CDRs in the specified record of the alignment, given the cdr position numbers
def get_cdrs(self, rec):
# Be flexible about the end point of CDR3
if self.cdrs[2][1] != "0" and not self.alignment.within_positions(self.cdrs[2][1]):
self.cdrs[2][1] = self.alignment.max_position()
ret = []
for i in range(3):
if self.alignment.within_positions(self.cdrs[i][0]) and self.alignment.within_positions(self.cdrs[i][1]):
a = self.alignment.index_of(self.cdrs[i][0])
b = self.alignment.index_of(self.cdrs[i][1])
ret.append(str(rec.seq)[a:b+1])
else:
ret.append("")
return ret
def categorize_index(self, index):
"""
Categorise an index into the alignment
returns:
1..3 if the position is in CDR1..3
11..14 if it is in FR1..4
>>> msa = Alignment("testfiles/test_cdr.fasta")
>>> pos = msa.read_position_numbers(file_name="testfiles/test_cdr_seqnum.txt")
>>> acdr = AnalyseCDR(msa, file_name="testfiles/test_cdr_range.txt")
>>> print acdr.categorize_index(0)
11
>>> print acdr.categorize_index(10)
1
>>> print acdr.categorize_index(35)
3
>>> print acdr.categorize_index(36)
14
"""
for i in range(3):
if index in self.range_cdr[i]:
return i+1
for i in range(3):
if len(self.range_cdr[i]) > 0 and index < self.range_cdr[i][0]:
return i+11
return 14
def category_diff(self, id1, id0=0):
"""
Express the difference between sequence id1 and the baseline sequence id0 as a text string showing the
count of differences in each CDR and FR. If id0 is not specified, the first sequence in the set is used
>>> msa = Alignment("testfiles/test_cdr.fasta")
>>> pos = msa.read_position_numbers(file_name="testfiles/test_cdr_seqnum.txt")
>>> acdr = AnalyseCDR(msa, file_name="testfiles/test_cdr_range.txt")
>>> print acdr.category_diff("1", "2")
1(2)0(1)0(0)0
>>> print acdr.category_diff("1", "1")
<BLANKLINE>
"""
totals = {}
seq1 = self.alignment[0].seq if not id0 else self.alignment.get_record(id0).seq
seq2 = self.alignment.get_record(id1).seq
for i in range(len(seq1)):
if seq1[i] != seq2[i]:
cat = self.categorize_index(i)
totals[cat] = totals.get(cat, 0) + 1
if len(totals) > 0:
return "%d<%d>%d<%d>%d<%d>%d" % (totals.get(11,0), totals.get(1,0), totals.get(12,0),
totals.get(2,0), totals.get(13,0),
totals.get(3,0), totals.get(14,0))
else:
return ""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import smtplib
from frappe import msgprint, throw, _
from frappe.email.smtp import SMTPServer, get_outgoing_email_account
from frappe.email.email_body import get_email, get_formatted_html
from frappe.utils.verified_command import get_signed_params, verify_request
from html2text import html2text
from frappe.utils import get_url, nowdate, encode, now_datetime, add_days
class BulkLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, subject=None, message=None, reference_doctype=None,
reference_name=None, unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, reply_to=None, cc=(), message_id=None, send_after=None):
"""Add email to sending queue (Bulk Email)
:param recipients: List of recipients.
:param sender: Email sender.
:param subject: Email subject.
:param message: Email message.
:param reference_doctype: Reference DocType of caller document.
:param reference_name: Reference name of caller document.
:param unsubscribe_method: URL method for unsubscribe. Default is `/api/method/frappe.email.bulk.unsubscribe`.
:param unsubscribe_params: additional params for unsubscribed links. default are name, doctype, email
:param attachments: Attachments to be sent.
:param reply_to: Reply to be captured here (default inbox)
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param send_after: Send this email after the given datetime. If value is in integer, then `send_after` will be the automatically set to no of days from current date.
"""
if not unsubscribe_method:
unsubscribe_method = "/api/method/frappe.email.bulk.unsubscribe"
if not recipients:
return
if isinstance(recipients, basestring):
recipients = recipients.split(",")
if isinstance(send_after, int):
send_after = add_days(nowdate(), send_after)
if not sender or sender == "Administrator":
email_account = get_outgoing_email_account()
sender = email_account.get("sender") or email_account.email_id
check_bulk_limit(recipients)
formatted = get_formatted_html(subject, message)
try:
text_content = html2text(formatted)
except HTMLParser.HTMLParseError:
text_content = "See html attachment"
if reference_doctype and reference_name:
unsubscribed = [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"reference_doctype": reference_doctype, "reference_name": reference_name})]
unsubscribed += [d.email for d in frappe.db.get_all("Email Unsubscribe", "email",
{"global_unsubscribe": 1})]
else:
unsubscribed = []
for email in filter(None, list(set(recipients))):
if email not in unsubscribed:
email_content = formatted
email_text_context = text_content
if reference_doctype:
unsubscribe_url = get_unsubcribed_url(reference_doctype, reference_name, email,
unsubscribe_method, unsubscribe_params)
# add to queue
email_content = add_unsubscribe_link(email_content, email, reference_doctype,
reference_name, unsubscribe_url, unsubscribe_message)
email_text_context += "\n" + _("This email was sent to {0}. To unsubscribe click on this link: {1}").format(email, unsubscribe_url)
add(email, sender, subject, email_content, email_text_context, reference_doctype,
reference_name, attachments, reply_to, cc, message_id, send_after)
def add(email, sender, subject, formatted, text_content=None,
reference_doctype=None, reference_name=None, attachments=None, reply_to=None,
cc=(), message_id=None, send_after=None):
"""add to bulk mail queue"""
e = frappe.new_doc('Bulk Email')
e.sender = sender
e.recipient = email
try:
mail = get_email(email, sender=e.sender, formatted=formatted, subject=subject,
text_content=text_content, attachments=attachments, reply_to=reply_to, cc=cc)
if message_id:
mail.set_message_id(message_id)
e.message = mail.as_string()
except frappe.InvalidEmailAddressError:
# bad email id - don't add to queue
return
e.reference_doctype = reference_doctype
e.reference_name = reference_name
e.send_after = send_after
e.insert(ignore_permissions=True)
def check_bulk_limit(recipients):
# get count of mails sent this month
this_month = frappe.db.sql("""select count(*) from `tabBulk Email` where
status='Sent' and MONTH(creation)=MONTH(CURDATE())""")[0][0]
# if using settings from site_config.json, check bulk limit
# No limit for own email settings
smtp_server = SMTPServer()
if (smtp_server.email_account
and getattr(smtp_server.email_account, "from_site_config", False)
or frappe.flags.in_test):
monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500
if (this_month + len(recipients)) > monthly_bulk_mail_limit:
throw(_("Email limit {0} crossed").format(monthly_bulk_mail_limit),
BulkLimitCrossedError)
def add_unsubscribe_link(message, email, reference_doctype, reference_name, unsubscribe_url, unsubscribe_message):
unsubscribe_link = """<div style="padding: 7px; text-align: center; color: #8D99A6;">
{email}. <a href="{unsubscribe_url}" style="color: #8D99A6; text-decoration: underline;
target="_blank">{unsubscribe_message}.
</a>
</div>""".format(unsubscribe_url = unsubscribe_url,
email= _("This email was sent to {0}").format(email),
unsubscribe_message = unsubscribe_message or _("Unsubscribe from this list"))
message = message.replace("<!--unsubscribe link here-->", unsubscribe_link)
return message
def get_unsubcribed_url(reference_doctype, reference_name, email, unsubscribe_method, unsubscribe_params):
params = {"email": email.encode("utf-8"),
"doctype": reference_doctype.encode("utf-8"),
"name": reference_name.encode("utf-8")}
if unsubscribe_params:
params.update(unsubscribe_params)
query_string = get_signed_params(params)
# for test
frappe.local.flags.signed_query_string = query_string
return get_url(unsubscribe_method + "?" + get_signed_params(params))
@frappe.whitelist(allow_guest=True)
def unsubscribe(doctype, name, email):
# unsubsribe from comments and communications
if not verify_request():
return
try:
frappe.get_doc({
"doctype": "Email Unsubscribe",
"email": email,
"reference_doctype": doctype,
"reference_name": name
}).insert(ignore_permissions=True)
except frappe.DuplicateEntryError:
frappe.db.rollback()
else:
frappe.db.commit()
return_unsubscribed_page(email, doctype, name)
def return_unsubscribed_page(email, doctype, name):
frappe.respond_as_web_page(_("Unsubscribed"), _("{0} has left the conversation in {1} {2}").format(email, _(doctype), name))
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
smtpserver = SMTPServer()
auto_commit = not from_test
# additional check
check_bulk_limit([])
if frappe.flags.mute_emails or frappe.conf.get("mute_emails") or False:
msgprint(_("Emails are muted"))
from_test = True
frappe.db.sql("""update `tabBulk Email` set status='Expired'
where datediff(curdate(), creation) > 3""", auto_commit=auto_commit)
for i in xrange(500):
email = frappe.db.sql("""select * from `tabBulk Email` where
status='Not Sent' and ifnull(send_after, "2000-01-01 00:00:00") < %s
order by creation asc limit 1 for update""", now_datetime(), as_dict=1)
if email:
email = email[0]
else:
break
frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""",
(email["name"],), auto_commit=auto_commit)
try:
if not from_test:
smtpserver.setup_email_account(email.reference_doctype)
smtpserver.sess.sendmail(email["sender"], email["recipient"], encode(email["message"]))
frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError):
# bad connection, retry later
frappe.db.sql("""update `tabBulk Email` set status='Not Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
# no need to attempt further
return
except Exception, e:
frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s
where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit)
def clear_outbox():
"""Remove mails older than 31 days in Outbox. Called daily via scheduler."""
frappe.db.sql("""delete from `tabBulk Email` where
datediff(now(), creation) > 31""")
|
|
"""
:copyright: (c) 2014 Building Energy Inc
:license: see LICENSE for more details.
"""
# system imports
import json
import datetime
# django imports
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
# vendor imports
from annoying.decorators import ajax_request
from dateutil import parser
# BE imports
from seed.tasks import (
add_buildings,
remove_buildings,
)
from superperms.orgs.decorators import has_perm
from seed.models import (
Compliance,
Project,
ProjectBuilding,
StatusLabel,
)
from seed.utils.api import api_endpoint
from ..utils import projects as utils
from ..utils.time import convert_to_js_timestamp
DEFAULT_CUSTOM_COLUMNS = [
'project_id',
'project_building_snapshots__status_label__name'
]
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_viewer')
def get_projects(request):
"""
Retrieves all projects for a given organization.
:GET: Expects organization_id in query string.
Returns::
{'status': 'success',
'projects': [
{
'name': project's name,
'slug': project's identifier,
'status': 'active',
'number_of_buildings': Count of buildings associated with project
'last_modified': Timestamp when project last changed
'last_modified_by': {
'first_name': first name of user that made last change,
'last_name': last name,
'email': email address,
},
'is_compliance': True if project is a compliance project,
'compliance_type': Description of compliance type,
'deadline_date': Timestamp of when compliance is due,
'end_date': Timestamp of end of project
}
...
]
}
"""
organization_id = request.GET.get('organization_id', '')
projects = []
for p in Project.objects.filter(
super_organization_id=organization_id,
).distinct():
if p.last_modified_by:
first_name = p.last_modified_by.first_name
last_name = p.last_modified_by.last_name
email = p.last_modified_by.email
else:
first_name = None
last_name = None
email = None
p_as_json = {
'name': p.name,
'slug': p.slug,
'status': 'active',
'number_of_buildings': p.project_building_snapshots.count(),
# convert to JS timestamp
'last_modified': int(p.modified.strftime("%s")) * 1000,
'last_modified_by': {
'first_name': first_name,
'last_name': last_name,
'email': email,
},
'is_compliance': p.has_compliance,
}
if p.has_compliance:
compliance = p.get_compliance()
p_as_json['end_date'] = convert_to_js_timestamp(
compliance.end_date)
p_as_json['deadline_date'] = convert_to_js_timestamp(
compliance.deadline_date)
p_as_json['compliance_type'] = compliance.compliance_type
projects.append(p_as_json)
return {'status': 'success', 'projects': projects}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_viewer')
def get_project(request):
"""
Retrieves details about a project.
:GET: Expects the project's identifier (slug) as project_slug in the
query string.
Expects an organization_id (to which project belongs) in the query
string.
Returns::
{
'name': project's name,
'slug': project's identifier,
'status': 'active',
'number_of_buildings': Count of buildings associated with project
'last_modified': Timestamp when project last changed
'last_modified_by': {
'first_name': first name of user that made last change,
'last_name': last name,
'email': email address,
},
'is_compliance': True if project is a compliance project,
'compliance_type': Description of compliance type,
'deadline_date': Timestamp of when compliance is due,
'end_date': Timestamp of end of project
}
"""
project_slug = request.GET.get('project_slug', '')
organization_id = request.GET.get('organization_id', '')
project = Project.objects.get(slug=project_slug)
if project.super_organization_id != int(organization_id):
return {'status': 'error', 'message': 'Permission denied'}
project_dict = project.__dict__
project_dict['is_compliance'] = project.has_compliance
if project_dict['is_compliance']:
c = project.get_compliance()
project_dict['end_date'] = convert_to_js_timestamp(c.end_date)
project_dict['deadline_date'] = convert_to_js_timestamp(
c.deadline_date)
project_dict['compliance_type'] = c.compliance_type
del(project_dict['_state'])
del(project_dict['modified'])
del(project_dict['created'])
return {'status': 'success', 'project': project_dict}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_member')
def delete_project(request):
"""
Deletes a project.
Payload::
{
'project_slug': identifier (slug) for the project
'organization_id': ID of the org the project belongs to
}
Returns::
{
'status': 'success or error',
'message': 'error message, if any'
}
"""
body = json.loads(request.body)
project_slug = body.get('project_slug', '')
organization_id = body.get('organization_id')
project = Project.objects.get(slug=project_slug)
if project.super_organization_id != int(organization_id):
return {'status': 'error', 'message': 'Permission denied'}
project.delete()
return {'status': 'success'}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_member')
def create_project(request):
"""
Creates a new project.
@TODO: What's a compliance_type?
Payload::
{
'organization_id': ID of org to associate new project with,
'project': {
'name': name of new project,
'compliance_type': description of type of compliance,
'description': description of new project,
'end_date': Timestamp for when project ends,
'deadline_date': Timestamp for compliance deadline
}
}
Returns::
{
'status': 'success' or 'error',
'message': 'error message, if any',
'project_slug': Identifier of new project, if project successfully
created
}
"""
body = json.loads(request.body)
project_json = body.get('project')
if Project.objects.filter(
name=project_json['name'],
super_organization_id=body['organization_id']
).exists():
return {
'status': 'error',
'message': 'project already exists for user'
}
project, created = Project.objects.get_or_create(
name=project_json['name'],
owner=request.user,
super_organization_id=body['organization_id'],
)
if not created:
return {
'status': 'error',
'message': 'project already exists for the organization'
}
project.last_modified_by = request.user
project.description = project_json.get('description')
project.save()
compliance_type = project_json.get('compliance_type', None)
end_date = project_json.get('end_date', None)
deadline_date = project_json.get('deadline_date', None)
if ((compliance_type is not None
and end_date is not None
and deadline_date is not None)):
c = Compliance(project=project)
c.compliance_type = compliance_type
c.end_date = parser.parse(project_json['end_date'])
c.deadline_date = parser.parse(project_json['deadline_date'])
c.save()
return {'status': 'success', 'project_slug': project.slug}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_member')
def add_buildings_to_project(request):
"""
Adds buildings to a project.
Payload::
{
'project':
{
'project_slug': Identifier of project to add buildings to,
'selected_buildings': A list of building IDs to add to project
}
}
Returns::
{
'status': 'success' or 'error',
'message': 'error message, if any',
'project_loading_cache_key': Identifier for the background job, to
determine the job's progress
}
"""
body = json.loads(request.body)
project_json = body.get('project')
project = Project.objects.get(slug=project_json['project_slug'])
add_buildings.delay(
project_slug=project.slug, project_dict=project_json,
user_pk=request.user.pk)
key = project.adding_buildings_status_percentage_cache_key
return {
'status': 'success',
'project_loading_cache_key': key
}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_member')
def remove_buildings_from_project(request):
"""
Removes buildings from a project.
Payload::
{
'project':
{
'project_slug': Identifier of project to remove buildings from,
'selected_buildings': A list of building IDs to remove
}
}
Returns::
{
'status': 'success' or 'error',
'message': 'error message, if any',
'project_removing_cache_key': Identifier for the background job, to
determine the job's progress
}
"""
body = json.loads(request.body)
project_json = body.get('project')
project = Project.objects.get(slug=project_json['slug'])
remove_buildings.delay(
project_slug=project.slug, project_dict=project_json,
user_pk=request.user.pk)
key = project.removing_buildings_status_percentage_cache_key
return {
'status': 'success',
'project_removing_cache_key': key
}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_member')
def update_project(request):
"""
Updates an existing project's details and compliance info.
Payload::
{
'project': {
'project_slug': Identifier of project to update,
'name': new name for project,
'is_compliance': true or false,
'compliance_type': (optional if 'is_compliance' is false)
description of type of compliance,
'end_date': (optional if 'is_compliance' is false) Timestamp for
when project ends,
'deadline_date': (optional if 'is_compliance' is false) Timestamp
for compliance deadline
}
}
Returns::
{
'status': 'success' or 'error',
'message': 'error message, if any'
}
"""
body = json.loads(request.body)
project_json = body.get('project')
project = Project.objects.get(slug=project_json['slug'])
project.name = project_json['name']
project.last_modified_by = request.user
project.save()
if project_json['is_compliance']:
if project.has_compliance:
c = project.get_compliance()
else:
c = Compliance.objects.create(
project=project,
)
c.end_date = parser.parse(project_json['end_date'])
c.deadline_date = parser.parse(project_json['deadline_date'])
c.compliance_type = project_json['compliance_type']
c.save()
elif not project_json['is_compliance'] and project.has_compliance:
# delete compliance
c = project.get_compliance()
c.delete()
return {
'status': 'success',
'message': 'project %s updated' % project.name
}
@api_endpoint
@ajax_request
@login_required
def get_adding_buildings_to_project_status_percentage(request):
"""
Returns percentage complete of background task for
adding building to project.
Payload::
{'project_loading_cache_key': Job identifier from
add_buildings_to_project.
}
Returns::
{'status': 'success',
'progress_object': {
'percentage_done': percent job done,
'numerator': number buildings added,
'denominator': total number of building to add
}
}
"""
body = json.loads(request.body)
project_loading_cache_key = body.get('project_loading_cache_key')
return {
'status': 'success',
'progress_object': cache.get(project_loading_cache_key)
}
@api_endpoint
@ajax_request
@login_required
@has_perm('requires_viewer')
def get_projects_count(request):
"""
Returns the number of projects within the org tree to which
a user belongs. Counts projects in parent orgs and sibling orgs.
:GET: Expects organization_id for the user's org.
Returns::
{
'status': 'success',
'projects_count': count of projects
}
"""
organization_id = request.GET.get('organization_id', '')
projects_count = Project.objects.filter(
super_organization_id=organization_id
).distinct().count()
return {'status': 'success', 'projects_count': projects_count}
@api_endpoint
@ajax_request
@login_required
def update_project_building(request):
"""
Updates extra information about the building/project relationship.
In particular, whether the building is compliant and who approved it.
Payload::
{
'project_slug': identifier of project,
'building_id': ID of building,
'label': {
'id': Identifier of label to apply.
}
}
Returns::
{
'status': 'success',
'approved_date': Timestamp of change (now),
'approver': Email address of user making change.
}
"""
body = json.loads(request.body)
pb = ProjectBuilding.objects.get(
project__slug=body['project_slug'],
building_snapshot__pk=body['building_id'])
pb.approved_date = datetime.datetime.now()
pb.approver = request.user
status_label = StatusLabel.objects.get(pk=body['label']['id'])
pb.status_label = status_label
pb.save()
return {
'status': 'success',
'approved_date': pb.approved_date.strftime("%m/%d/%Y"),
'approver': pb.approver.email,
}
@api_endpoint
@ajax_request
@login_required
def move_buildings(request):
"""
Moves buildings from one project to another.
Payload::
{
"buildings": [
"00010811",
"00010809"
],
"copy": true to copy the buildings, false to move,
"search_params": {
"filter_params": {
"project__slug": "proj-1"
},
"project_slug": 34,
"q": "hotels"
},
"select_all_checkbox": false,
"source_project_slug": "proj-1",
"target_project_slug": "proj-2"
}
Returns::
{'status': 'success'}
"""
body = json.loads(request.body)
utils.transfer_buildings(
source_project_slug=body['source_project_slug'],
target_project_slug=body['target_project_slug'],
buildings=body['buildings'],
select_all=body['select_all_checkbox'],
search_params=body['search_params'],
user=request.user,
copy_flag=body['copy']
)
return {'status': 'success'}
@api_endpoint
@ajax_request
@login_required
def get_labels(request):
"""
Gets all labels for any organization the user has access to.
Returns::
{
'status': 'success',
'labels':
[
{
'name': name of label,
'color': color of label,
'id': label's ID
}, ...
]
}
"""
labels = utils.get_labels(request.user)
return {'status': 'success', 'labels': labels}
@api_endpoint
@ajax_request
@login_required
def add_label(request):
"""
Creates a new label.
Payload::
{
'label':
{
"color": "red",
"name": "non compliant"
}
}
Returns::
{
'status': 'success',
'label_id': The ID of the new label.
}
"""
body = json.loads(request.body)
label = body['label']
status_label, created = StatusLabel.objects.get_or_create(
# need a better way to get this, maybe one per org
super_organization=request.user.orgs.all()[0],
name=label['name'],
color=label['color'],
)
return {'status': 'success',
'label_id': status_label.pk}
@api_endpoint
@ajax_request
@login_required
def update_label(request):
"""
Updates a label.
Payload::
{
"label": {
"color": Label's new color,
"id": ID of label to change,
"name": Label's new name,
}
}
Returns::
{'status': 'success'}
"""
body = json.loads(request.body)
label = body['label']
status_label = StatusLabel.objects.get(pk=label['id'])
status_label.color = label['color']
status_label.name = label['name']
status_label.save()
return {'status': 'success'}
@api_endpoint
@ajax_request
@login_required
def delete_label(request):
"""
Deletes a label.
Payload::
{'label':
{'id': ID of label to delete}
}
Returns::
{'status': 'success'}
"""
body = json.loads(request.body)
label = body['label']
status_label = StatusLabel.objects.get(pk=label['id'])
ProjectBuilding.objects.filter(
status_label=status_label
).update(status_label=None)
status_label.delete()
return {'status': 'success'}
@api_endpoint
@ajax_request
@login_required
def apply_label(request):
"""
Applies a label to buildings (within a project).
Payload::
{
"buildings": [
"00010811",
"00010809"
],
"label": {"id": 1 },
"project_slug": "proj-1",
"search_params": {
"filter_params": {
"project__slug": "proj-1"
},
"project_slug": 34,
"q": ""
},
"select_all_checkbox": false
}
Returns::
{'status': 'success'}
"""
body = json.loads(request.body)
utils.apply_label(
project_slug=body['project_slug'],
buildings=body['buildings'],
select_all=body['select_all_checkbox'],
label=body['label'],
search_params=body['search_params'],
user=request.user,
)
return {'status': 'success'}
@api_endpoint
@ajax_request
@login_required
def remove_label(request):
"""
Removes labels from buildings (within a project).
Payload::
{
"buildings": [
"IMP75-0004N0027"
],
"project_slug": "proj-1",
"search_params": {
"filter_params": {
"project__slug": "proj-1"
},
"project_slug": 34,
"q": ""
},
"select_all_checkbox": false
}
Returns::
{'status': 'success'}
"""
body = json.loads(request.body)
ProjectBuilding.objects.filter(
project__pk=body['project']['id'],
building_snapshot__pk=body['building']['id']
).update(
status_label=None
)
return {'status': 'success'}
|
|
"""
Table of command codes is in Section 4.1, page 58.
"""
import re
from decimal import Decimal
def parse_command(s):
if s[0] == '%':
# Extended commands, identify by first 3 chars
code = s[1:3]
cls = extended_commands[code]
return cls.from_string(s)
else:
# For normal commands, identify by end
code = s[-4:-1]
if code in normal_commands:
cls = normal_commands[code]
return cls.from_string(s)
else:
# If not in the map it's a set aperture command
return SetApertureCommand.from_string(s)
class Command(object):
"""
Base class for Gerber commands.
"""
deprecated = False
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.__dict__)
@classmethod
def from_string(cls, s):
return cls()
class UnitCommand(Command):
"""
Command Code MO - Extended
Section 4.10, p98
"""
def __init__(self, unit):
self.unit = unit
@classmethod
def from_string(cls, s):
unit = s[3:5]
assert unit in ('IN', 'MM'), "invalid unit %r" % unit
return cls(unit=unit)
def to_string(self):
return '%MO' + self.unit + '*%'
def execute(self, state, plane):
state.set_unit(self.unit)
class CoordinateFormatCommand(Command):
"""
Command Code FS - Extended
Section 4.9, p96
"""
def __init__(self, integer_digits, fractional_digits):
self.integer_digits = integer_digits
self.fractional_digits = fractional_digits
@classmethod
def from_string(cls, s):
assert s.startswith('%FSLAX')
xformat = s[6:8]
yformat = s[9:11]
assert xformat == yformat
return cls(integer_digits=int(xformat[0]),
fractional_digits=int(xformat[1]))
def to_string(self):
format = '%d%d' % (self.integer_digits, self.fractional_digits)
return '%FSLAX' + format + 'Y' + format + '*%'
def execute(self, state, plane):
state.set_coordinate_format(integer_digits=self.integer_digits,
fractional_digits=self.fractional_digits)
class OffsetCommand(Command):
"""
Comamnd Code OF - Extended, Deprecated
Section 7.1.7, p163
Syntax is like %OFA1.2B-1.0*%
"""
deprecated = True
def __init__(self, offset_a, offset_b):
self.offset_a = offset_a
self.offset_b = offset_b
@classmethod
def from_string(cls, s):
assert s.startswith('%OFA')
format = s[4:-2]
raw_a, raw_b = format.split('B')
offset_a = Decimal(raw_a)
offset_b = Decimal(raw_b)
return cls(offset_a=offset_a, offset_b=offset_b)
def to_string(self):
return '%OFA' + str(self.offset_a) + 'B' + str(self.offset_b) + '*%'
def execute(self, state, plane):
# XXX
pass
class ImagePolarityCommand(Command):
"""
Command Code IP - Extended, Deprecated
Section 7.1.3, p160
"""
deprecated = True
def __init__(self, polarity):
self.polarity = polarity
@classmethod
def from_string(cls, s):
polarity = s[3:-2]
assert polarity in ('POS', 'NEG')
return cls(polarity=polarity)
def to_string(self):
return '%IP' + self.polarity + '*%'
def execute(self, state, plane):
# XXX
pass
class LevelPolarityCommand(Command):
"""
Command Code LP - Extended
Section 4.15.1, p132
Syntax is like %LPD*% or %LPC*%
C = clear
D = dark
"""
def __init__(self, polarity):
self.polarity = polarity
@classmethod
def from_string(cls, s):
polarity = s[3]
assert polarity in ('D', 'C')
return cls(polarity=polarity)
def to_string(self):
return '%LP' + self.polarity + '*%'
def execute(self, state, plane):
state.set_level_polarity('dark' if self.polarity == 'D' else 'clear')
class MacroApertureCommand(Command):
"""
Command Code AM - Extended
Section 4.13.1 - p106
Syntax is complex, return to this later
"""
# XXX This is missing a lot of stuff
def __init__(self, template_name, s):
self.template_name = template_name
self.s = s
@classmethod
def from_string(cls, s):
assert s.startswith('%AM')
template_name = s.split('*', 1)[0][3:]
return cls(template_name=template_name,
s=s)
def to_string(self):
return self.s
def execute(self, state, plane):
# XXX
pass
class ApertureDefinitionCommand(Command):
"""
Comamnd Code AD - Extended
Section 4.11.1 p p99
Syntax is complex, return to this later
Aperture definitions can either include relevant information directly, or
can reference a named aperture macro created by a MacroApertureCommand.
"""
# XXX This is missing a lot of stuff
def __init__(self, aperture_number, template_name, s):
self.aperture_number = aperture_number
self.template_name = template_name
self.s = s
@classmethod
def from_string(cls, s):
assert s.startswith('%ADD')
content = s[4:-2]
m = re.match('^(\d+)([a-zA-Z_.]+)', content)
aperture_number = int(m.group(1))
template_name = m.group(2)
return cls(aperture_number=aperture_number,
template_name=template_name,
s=s)
def to_string(self):
return self.s
def execute(self, state, plane):
# XXX
pass
class SetApertureCommand(Command):
"""
Command Code Dnnnn
Section 4.3.1, p64
Syntax is like Dnnn*
"""
def __init__(self, aperture_number):
assert aperture_number >= 10
self.aperture_number = aperture_number
@classmethod
def from_string(cls, s):
aperture_number = int(s[1:-1])
return cls(aperture_number=aperture_number)
def to_string(self):
return 'D' + str(self.aperture_number) + '*'
def execute(self, state, plane):
state.set_current_aperture(self.aperture_number)
class InterpolateCommand(Command):
"""
Command Code D01
Section 4.2.2, p61
Syntax is like XnnnYnnnInnnJnnnD01* in circular interpolation modes
Syntax is like XnnnYnnnD01* in linear interpolation mode
XnnnYnnn indicates the end point
InnnJnnn indicates the center point offsets in circular modes
"""
def __init__(self, x_string, y_string, i_string=None, j_string=None):
self.x_string = x_string
self.y_string = y_string
self.i_string = i_string
self.j_string = j_string
@classmethod
def from_string(cls, s):
if 'I' in s:
m = re.match('X(\d+)Y(\d+)I(\d+)J(\d+)', s)
x_string = m.group(1)
y_string = m.group(2)
i_string = m.group(3)
j_string = m.group(4)
return cls(x_string=x_string, y_string=y_string,
i_string=i_string, j_string=j_string)
else:
m = re.match('X(\d+)Y(\d+)', s)
x_string = m.group(1)
y_string = m.group(2)
return cls(x_string=x_string, y_string=y_string)
def to_string(self):
if self.i_string:
return 'X%sY%sI%sJ%sD01*' % (self.x_string, self.y_string,
self.i_string, self.j_string)
else:
return 'X%sY%sD01*' % (self.x_string, self.y_string)
def execute(self, state, plane):
# XXX
pass
class MoveCommand(Command):
"""
Command Code D02
Section 4.2.3, p62
Syntax is like XnnnYnnnD02*
"""
def __init__(self, x_string, y_string):
self.x_string = x_string
self.y_string = y_string
@classmethod
def from_string(cls, s):
raw = s[1:-4]
x_string, y_string = raw.split('Y')
return cls(x_string=x_string, y_string=y_string)
def to_string(self):
return 'X' + self.x_string + 'Y' + self.y_string + 'D02*'
def execute(self, state, plane):
# XXX
pass
class FlashCommand(Command):
"""
Command Code D03
Section 4.2.4, p62
Syntax is like XnnnYnnnD03*
"""
def __init__(self, x_string, y_string):
self.x_string = x_string
self.y_string = y_string
@classmethod
def from_string(cls, s):
raw = s[1:-4]
x_string, y_string = raw.split('Y')
return cls(x_string=x_string, y_string=y_string)
def to_string(self):
return 'X' + self.x_string + 'Y' + self.y_string + 'D03*'
def execute(self, state, plane):
# XXX
pass
class LinearInterpolationModeCommand(Command):
"""
Command Code G01
Section 4.4.1, p65
No args
"""
def to_string(self):
return 'G01*'
def execute(self, state, plane):
state.set_interpolation_mode('linear')
class CWCircularInterpolationModeCommand(Command):
"""
Command Code G02
Section 4.5.3, p68
No args
"""
def to_string(self):
return 'G02*'
def execute(self, state, plane):
state.set_interpolation_mode('clockwie-circular')
class CCWCircularInterpolationModeCommand(Command):
"""
Command Code G03
Section 4.5.4, p68
No args
"""
def to_string(self):
return 'G03*'
def execute(self, state, plane):
state.set_interpolation_mode('counterclockwise-circular')
class SingleQuadrantCommand(Command):
"""
Command Code G74
Section 4.5.5, p68
No args
"""
def to_string(self):
return 'G74*'
def execute(self, state, plane):
state.set_quadrant_mode('single')
class MultiQuadrantCommand(Command):
"""
Command Code G75
Section 4.5.6, p68
No args
"""
def to_string(self):
return 'G75*'
def execute(self, state, plane):
state.set_quadrant_mode('multi')
class EnableRegionModeCommand(Command):
"""
Command Code G36
Section 4.6.2, p76
No args
"""
def to_string(self):
return 'G36*'
def execute(self, state, plane):
state.set_region_mode('on')
class DisableRegionModeCommand(Command):
"""
Command Code G37
Section 4.6.3, p76
No args
"""
def to_string(self):
return 'G37*'
def execute(self, state, plane):
state.set_region_mode('off')
class CommentCommand(Command):
"""
Command Code G04
Section 4.7, p94
"""
def __init__(self, comment):
self.comment = comment
@classmethod
def from_string(cls, s):
comment = s[3:-1]
return cls(comment=comment)
def to_string(self):
return 'G04' + self.comment + '*'
def execute(self, state, plane):
pass
class EOFCommand(Command):
"""
Command Code M02
No args
"""
def to_string(self):
return 'M02*'
def execute(self, state, plane):
pass
extended_commands = {
'MO': UnitCommand,
'FS': CoordinateFormatCommand,
'OF': OffsetCommand,
'IP': ImagePolarityCommand,
'LP': LevelPolarityCommand,
'AM': MacroApertureCommand,
'AD': ApertureDefinitionCommand,
}
normal_commands = {
'D01': InterpolateCommand,
'D02': MoveCommand,
'D03': FlashCommand,
'G01': LinearInterpolationModeCommand,
'G02': CWCircularInterpolationModeCommand,
'G03': CCWCircularInterpolationModeCommand,
'G74': SingleQuadrantCommand,
'G75': MultiQuadrantCommand,
'G36': EnableRegionModeCommand,
'G37': DisableRegionModeCommand,
'G04': CommentCommand,
'M02': EOFCommand,
}
|
|
#!/usr/bin/env python
# graphviz -- Python interface to the GraphViz graphing utility.
#
# Copyright 2002 by Kevin Quick <quick@null.net>. All rights reserved.
import string
# # ### ### # ### ### # # ### ### # # ###
# # # # # # # # # # # # ## # #
#### # # # # # # # # ### # #### ## ####
# # # # # # # # # # # # ## #
## # ### #### ### # # # # # # # ###
mlabelsep = '|'
def dictstr(label, the_dict, sep=',', pfx='', sfx=''):
"Generates 'key=value;\n...' list from input dict; no = if value is array"
astr = []
if label and len(label):
if 'shape' in the_dict.keys() and the_dict['shape'] == 'record':
astr.append('label="%s"'%label)
else:
lbls = string.split(label, mlabelsep)
if len(lbls) == 1:
astr.append('label="%s"'%label)
else:
bstr = ''
cstr = []
for dstr in lbls:
if len(bstr) <= 8:
if len(bstr) == 0:
bstr = dstr
else:
bstr = '%s%s%s'%(bstr, mlabelsep, dstr)
else:
cstr.append(bstr)
bstr = dstr
cstr.append(bstr)
astr.append('label="%s"'%string.join(cstr,'%s\\n'%mlabelsep))
for key in the_dict.keys():
if the_dict[key][0] == '[':
astr.append('%s %s'%(key, the_dict[key]))
else:
astr.append('%s=%s'%(key, the_dict[key]))
if len(astr):
return '%s%s%s'%(pfx, string.join(astr, sep), sfx)
else:
return ''
def attrstr(label, attrdict):
"Generates bracketed 'key=value,...' list from input dictionary"
return dictstr(label, attrdict, ',', '[ ', ' ]')
def stmtstr(label, stmtdict):
"Generates 'key=value;\n...' list from input dict; no = if value is array"
return dictstr(label, stmtdict, ';\n', '', ';\n')
def dictadd(dict1, dict2):
"Adds two dictionaries together; result is elements from both"
rdict = dict1
for key in dict2.keys():
rdict[key] = dict2[key]
return rdict
def is_safechar(c):
return c in string.ascii_letters
def label_elem_id(elemname, elemnum, pfx=''):
if not elemname or not len(elemname): return ''
id = string.join(filter(is_safechar, elemname),'')
if not len(id): return '%s%d'%(pfx, elemnum)
return id
def record_port(record_ast, recordname, elemname, pfx='L'):
"For a record (AST), return the portname edge target for elemname"
r = []
print 'record_port(..., %s, %s)'%(elemname, pfx)
for e in record_ast:
if type(e) == type((1,2)) or type(e) == type([1,2]):
p = record_port(e, recordname, elemname, '%sL'%pfx)
if p != recordname: return p
r.append('NOTINTHISSUBELEM')
continue
elif type(e) == type('string'):
s = e
else:
s = str(e)
print ' s: %s'%s
if s == elemname:
return '%s:%s'%(recordname, label_elem_id(s, len(r), pfx))
r.append(s)
return recordname # No port, just point to record in general
def seq_to_recordlabel(seq, pfx='L'):
r = []
for e in seq:
if type(e) == type((1,2)) or type(e) == type([1,2]):
r.append('{%s}'%seq_to_recordlabel(e,'%sL'%pfx))
continue
elif type(e) == type('string'):
s = e
else:
s = str(e)
l = label_elem_id(s,len(r),pfx)
if len(l):
r.append('<%s>%s'%(l,s))
else:
r.append(s)
return string.join(r,'|')
### ## ### #### ### # ## ### ###
# # # # # # # # # # # #
#### ### #### ## ### # # #### ## ## ####
# # # # # # # # # # # #
### # # ### #### ### #### # # ### ###
class gv_base:
def __init__(self, name, type):
namec = filter(lambda c: c in string.ascii_letters + string.digits + '_', name)
self._name = ''.join(namec)
self._type = type
self._label = name
self._attrs = {}
def __str__(self):
return '%s %s "%s" <%s> %s'%(self._type, self._name,
self._dotcmd(),
self._label, self._attrs)
def __repr__(self): return '%s(%s)'%(self._type, self._name)
def name(self): return self._name
def type(self): return self._type
def label(self, newlabel=None):
if newlabel:
if type(newlabel) == type('string'):
self._label = newlabel
elif type(newlabel) == type([0,1]) or \
type(newlabel) == type((0,1)) :
self._label = seq_to_recordlabel(newlabel)
self.attr('shape','record')
else:
self._label = str(newlabel)
if not self._label: return self._name
return self._label
def color(self, newcolorname=None):
if newcolorname: self._attrs['color'] = newcolorname
return self._attrs['color']
def attr(self, attrname, attrvalue=''):
if attrvalue: self._attrs[attrname] = attrvalue
try:
return self._attrs[attrname]
except KeyError: return None
def dot(self):
return '%s %s;\n'%(self._dotcmd(),
attrstr(self._label, self._attrs))
def _dotcmd(self): return self._name
#### # #### # # #### # # ### ###
# # # ## ## # ## # # #
#### #### ### # ### # # # ### #### # ## #### ####
# # # # # # # ## # #
#### #### #### # # #### # # # ###
class gv_node (gv_base):
def __init__(self, name):
gv_base.__init__(self, name, 'NODE')
#kwqkwqKWQkwqKWQkwqKWQ: if node name is an array (or nest of arrays) node type is record and arrays describe vert/hor/vert/hor nesting of record points. Do anything for ease of port specification on vectors?
#kwqKWQkwqKWQ: nesting of clusters?
class gv_edge (gv_base):
def __init__(self, from_node, to_node,label=''):
if isinstance(from_node, gv_node): from_node = from_node.name()
if isinstance(to_node, gv_node): to_node = to_node.name()
gv_base.__init__(self, '%s->%s'%(from_node, to_node), 'EDGE')
self._fnode = from_node
self._tnode = to_node
if label: self.label(label)
else: self.label(' ')
def _dotcmd(self): return '%s -> %s'%(self._fnode, self._tnode)
class gv_cluster (gv_base):
StdColors = [ 'grey90', 'yellow', 'cyan', 'orchid', 'lawngreen',
'orange', 'tan', 'lavender', 'gray80' ]
def __init__(self, cname):
gv_base.__init__(self, cname, 'CLUS')
self._statements = {}
self._subelements = {} # Key is name, element is gv_node
self._visible = 1
def invisible(self): self._visible = 0
def visible(self): self._visible = 1
def name(self):
if self._visible: return 'cluster_%s'%self._name
return self._name
def color(self, colorname=''):
if colorname:
if colorname == 'auto':
colorname = self.StdColors[hash(self)%len(self.StdColors)]
self._statements['color'] = colorname
self._statements['style'] = 'filled'
self._statements['node'] = '[style=filled,color=white]'
return self._statements['color']
def dot(self):
return 'subgraph %s {\n%s%s}\n'%(
self.name(),
stmtstr(self._label, self._statements),
string.join(map(lambda x: x.dot(),
self._subelements.values()), ''))
def __str__(self):
return '%s {\n%s%s}\n'%(gv_base.__str__(self),
self._statements,
map(str, self._subelements))
def add(self, node):
self._subelements[node.name()] = node
def as_node(self):
node = gv_node('CL_%s'%self.name())
node.label('%s'%self.label())
if 'color' in self._statements.keys():
node.color(self._statements['color'])
node.attr('style', 'filled')
return node
### ### ## ### # # ### # ## ### ###
# # # # # # # # # # # # # # #
#### # ## ### #### ### #### # # #### ## ## ####
# # # # # # # # # # # # # # #
### # # # # # # # ### #### # # ### ###
class graphviz:
def __init__(self, name):
self._elements = {} # Key is name, element is gv_{node|edge|cluster}
self._statements = {}
self._name = name
# Specify various attributes controlling the graph's parameters
_std_fullpage = { 'ratio':'fill', 'center':'1', 'shape':'ellipse',
'style':'solid',
'arrowhead':'normal', 'arrowtail':'none'
}
_landscape = { 'size':'"10,7"', 'page':'"8.5,11"', 'rotate':'90' }
_portrait = { 'size':'"7,10"', 'page':'"11,8.5"', 'rotate':'0' }
def fullpage(self):
self._statements = dictadd(self._statements, self._std_fullpage)
def landscape(self):
self._statements = dictadd(self._statements, self._landscape)
def portrait(self):
self._statements = dictadd(self._statements, self._portrait)
def attr(self, attrname, attrvalue=''):
if attrvalue: self._statements[attrname] = attrvalue
return self._statements[attrname]
# Input Handling
def add(self, element):
"Add a node, edge, or cluster to this graph"
name = element.name()
if name not in self._elements.keys():
self._elements[name] = element
return element
if element.label() in string.split(self._elements[name].label(),
mlabelsep):
return self._elements[name]
self._elements[name].label('%s%s%s'%(
self._elements[name].label(), mlabelsep, element.label()))
for akey in element._attrs.keys():
self._elements[name].attr(akey, element._attrs[akey])
return element
# Return only elements of this graph of the specified type
def _of_type(self, of_type):
return filter(lambda x,of_type=of_type: x.type() == of_type,
self._elements.values())
def nodes(self): return self._of_type('NODE')
def edges(self): return self._of_type('EDGE')
def clusters(self): return self._of_type('CLUS')
# Output Preparation
def __str__(self):
return 'GRPH {%s} <%s>'%(stmtstr('', self._statements),
string.join(map(str, self._elements.values()),'\n'))
def dot(self):
"Return the GraphViz dot notation for this graph"
return 'digraph %s {%s%s}\n'%(
self._name,
stmtstr('', self._statements),
string.join(
reduce(lambda x,y: x+y,
map(lambda type:
map(lambda x: x.dot(), self._of_type(type)),
[ 'CLUS', 'NODE', 'EDGE' ])),
'')
)
# Manipulation and Massaging
def autocluster(self, min_nodes=3):
"Automatically make a cluster/clusternode to eliminate similar edges"
pcl = {}
for n in self.nodes():
# find all edges terminating on the test node
srcs = filter(lambda e,n=n: e._tnode == n.name(), self.edges())
# make a dictionary: key is edge label, values are all
# above edges with this label
lsrcs = {}
for e in srcs:
key = e.label()
if e.attr('style') == 'bold': key = "%s bOlD"%key
if key in lsrcs.keys():
lsrcs[key].append(e)
else:
lsrcs[key] = [e]
# Now remember all arrays of edges greater than the minimum
clsrcs = filter(lambda lsrc:len(lsrc) >= min_nodes, lsrcs.values())
# Store each array in our possible cluster dictionary, key
# is len of array
for clsrc in clsrcs:
if len(clsrc) in pcl.keys():
pcl[len(clsrc)].append( (n,clsrc) )
else:
pcl[len(clsrc)] = [ (n,clsrc) ]
# Now process each possible cluster, from the most entries to the least
pcllens = pcl.keys()
pcllens.sort()
pcllens.reverse()
clnum = 1
usednodes = []
for pcllen in pcllens:
for tgtnode,tgtedges in pcl[pcllen]:
# Get all source nodes for this potential cluster
srcnodes = map(lambda edge: edge._fnode, tgtedges)
# If any nodes are already used in a cluster, skip
# this one since a node can only be a part of one
# cluster at a time
if len(filter(lambda n, usednodes=usednodes: n in usednodes,
srcnodes)):
continue
# Make a cluster!
cl = gv_cluster('%d'%clnum)
cl.label('Cluster %d'%clnum)
cl.color('auto')
self.add(cl)
clnum = clnum + 1
# Add all source nodes for this collection to the cluster,
# removing those source nodes from the top-level list of
# nodes and also removing all associated edges
for srcnode in srcnodes:
try:
cl.add(self._elements[srcnode])
del self._elements[srcnode]
except KeyError:
print 'NO KEY "%s" IN'%srcnode, str(self._elements)
print '-'*40
for tgtedge in tgtedges:
del self._elements[tgtedge.name()]
# Now create the node representing the cluster and an edge
# linking that new node to the target node
clnd = cl.as_node()
self.add(clnd)
e = gv_edge(clnd.name(), tgtnode.name())
e.label(tgtedges[0].label())
self.add(e)
usednodes = usednodes + srcnodes
# # ## ### # # ### #### ### ###
## ## # # # ## # # # # # #
#### #### # # # #### # #### # # ### ## # #### ####
# # # # # # ## # # # # #
# # # # ### # # # #### ### #
if __name__ == "__main__":
# Generate test graphs
testgraphs = { 'testgr.1' : [gv_node('A'), gv_node('B'), gv_edge('A','B')],
'testgr.2' : [gv_node('A'), gv_node('B'),
gv_node('C'), gv_node('D'),
gv_edge('A','B'),
gv_edge('B','C'),
gv_edge('C','D'),
gv_edge('D','A')],
'testgr.3' : [ gv_node('Terminating'),
gv_node('Uninitialized') ],
'testgr.4' : [ gv_node('Terminating'),
gv_node('Uninitialized'),
gv_node('Initing'),
gv_node('Disconnected'),
gv_node('Serving'),
gv_node('Ready'),
gv_edge('Initing','Initing','A|C|F|G'),
gv_edge('Ready','Disconnected','M'),
gv_edge('Serving','Serving','A|I|C|K|L|F|G'),
gv_edge('Initing', 'Uninitialized','B'),
gv_edge('Uninitialized', 'Uninitialized',
'I|C|K|L|F|G|R'),
gv_edge('Terminating', 'Terminating',
'A|I|C|K|L|P|G|H'),
gv_edge('Initing', 'Terminating', 'H'),
gv_edge('Disconnected', 'Terminating', 'H'),
gv_edge('Serving', 'Terminating', 'H'),
gv_edge('Ready', 'Terminating', 'H'),
],
}
fntrans = string.maketrans(string.letters+string.digits,
string.letters+string.digits)
for testg in testgraphs.keys():
f = open(testg,'w')
g = graphviz(string.translate(testg,fntrans,string.punctuation))
g.fullpage()
map(g.add, testgraphs[testg])
if testg == 'testgr.3':
c = gv_cluster('1')
c.label('Cluster 1')
c.color('auto')
n = gv_node('Initing')
c.add(n)
n = gv_node('Disconnected')
c.add(n)
n = gv_node('Serving')
c.add(n)
n = gv_node('Ready')
c.add(n)
g.add(c)
e = gv_edge('Initing','Initing')
e.label('A| C| F| G')
g.add(e)
e = gv_edge('Initing', 'Disconnected')
e.label('E')
e.attr('style','bold')
g.add(e)
e = gv_edge('Disconnected', 'Ready')
e.label('O')
e.attr('style','bold')
g.add(e)
e = gv_edge('Ready', 'Disconnected')
e.label('M')
g.add(e)
e = gv_edge('Ready', 'Ready')
e.label('A| I| J| K|L|G')
e.attr('style','bold')
g.add(e)
e = gv_edge('Initing', 'Serving')
e.label('D')
e.attr('style','bold')
g.add(e)
e = gv_edge('Serving', 'Serving')
e.label('A|I|C|K|L|F|G')
g.add(e)
e = gv_edge('Initing', 'Uninitialized')
e.label('B')
g.add(e)
e = gv_edge('Uninitialized', 'Initing')
e.label('Q')
e.attr('style','bold')
g.add(e)
e = gv_edge('Uninitialized', 'Uninitialized')
e.label('I|C|K|L|F|G|R')
g.add(e)
e = gv_edge('Terminating', 'Terminating')
e.label('A|I|C|K|L|P|G|H')
g.add(e)
cn = c.as_node()
g.add(cn)
e = gv_edge(cn.name(), 'Terminating')
e.label('H')
g.add(e)
if testg == 'testgr.4':
e = gv_edge('Initing', 'Disconnected','E')
e.attr('style','bold')
g.add(e)
e = gv_edge('Disconnected', 'Ready','O')
e.attr('style','bold')
g.add(e)
e = gv_edge('Ready', 'Ready','A| I| J| K|L|G')
e.attr('style','bold')
g.add(e)
e = gv_edge('Initing', 'Serving','D')
e.attr('style','bold')
g.add(e)
e = gv_edge('Uninitialized', 'Initing', 'Q')
e.attr('style','bold')
g.add(e)
g.autocluster()
f.write(g.dot())
f.close()
print 'Wrote Test:',testg
print 'End Tests'
#TODO:
# Records
# Subgraph extraction
# Output order should be edges, clusters, nodes?
|
|
"""
A minimalist REST API for Salt
==============================
This ``rest_wsgi`` module provides a no-frills REST interface for sending
commands to the Salt master. There are no dependencies.
Extra care must be taken when deploying this module into production. Please
read this documentation in entirety.
All authentication is done through Salt's :ref:`external auth <acl-eauth>`
system.
Usage
=====
* All requests must be sent to the root URL (``/``).
* All requests must be sent as a POST request with JSON content in the request
body.
* All responses are in JSON.
.. seealso:: :py:mod:`rest_cherrypy <salt.netapi.rest_cherrypy.app>`
The :py:mod:`rest_cherrypy <salt.netapi.rest_cherrypy.app>` module is
more full-featured, production-ready, and has builtin security features.
Deployment
==========
The ``rest_wsgi`` netapi module is a standard Python WSGI app. It can be
deployed one of two ways.
Using a WSGI-compliant web server
---------------------------------
This module may be run via any WSGI-compliant production server such as Apache
with mod_wsgi or Nginx with FastCGI.
It is strongly recommended that this app be used with a server that supports
HTTPS encryption since raw Salt authentication credentials must be sent with
every request. Any apps that access Salt through this interface will need to
manually manage authentication credentials (either username and password or a
Salt token). Tread carefully.
:program:`salt-api` using a development-only server
---------------------------------------------------
If run directly via the salt-api daemon it uses the `wsgiref.simple_server()`__
that ships in the Python standard library. This is a single-threaded server
that is intended for testing and development. **This server does not use
encryption**; please note that raw Salt authentication credentials must be sent
with every HTTP request.
**Running this module via salt-api is not recommended!**
In order to start this module via the ``salt-api`` daemon the following must be
put into the Salt master config::
rest_wsgi:
port: 8001
.. __: http://docs.python.org/2/library/wsgiref.html#module-wsgiref.simple_server
Usage examples
==============
.. http:post:: /
**Example request** for a basic ``test.ping``::
% curl -sS -i \\
-H 'Content-Type: application/json' \\
-d '[{"eauth":"pam","username":"saltdev","password":"saltdev","client":"local","tgt":"*","fun":"test.ping"}]' localhost:8001
**Example response**:
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 89
Content-Type: application/json
{"return": [{"ms--4": true, "ms--3": true, "ms--2": true, "ms--1": true, "ms--0": true}]}
**Example request** for an asynchronous ``test.ping``::
% curl -sS -i \\
-H 'Content-Type: application/json' \\
-d '[{"eauth":"pam","username":"saltdev","password":"saltdev","client":"local_async","tgt":"*","fun":"test.ping"}]' localhost:8001
**Example response**:
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 103
Content-Type: application/json
{"return": [{"jid": "20130412192112593739", "minions": ["ms--4", "ms--3", "ms--2", "ms--1", "ms--0"]}]}
**Example request** for looking up a job ID::
% curl -sS -i \\
-H 'Content-Type: application/json' \\
-d '[{"eauth":"pam","username":"saltdev","password":"saltdev","client":"runner","fun":"jobs.lookup_jid","jid":"20130412192112593739"}]' localhost:8001
**Example response**:
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 89
Content-Type: application/json
{"return": [{"ms--4": true, "ms--3": true, "ms--2": true, "ms--1": true, "ms--0": true}]}
:form lowstate: A list of lowstate data appropriate for the
:ref:`client <client-apis>` interface you are calling.
:status 200: success
:status 401: authentication required
"""
import errno
import logging
import os
import salt
import salt.netapi
import salt.utils.json
# HTTP response codes to response headers map
H = {
200: "200 OK",
400: "400 BAD REQUEST",
401: "401 UNAUTHORIZED",
404: "404 NOT FOUND",
405: "405 METHOD NOT ALLOWED",
406: "406 NOT ACCEPTABLE",
500: "500 INTERNAL SERVER ERROR",
}
__virtualname__ = "rest_wsgi"
logger = logging.getLogger(__virtualname__)
def __virtual__():
mod_opts = __opts__.get(__virtualname__, {})
if "port" in mod_opts:
return __virtualname__
return False
class HTTPError(Exception):
"""
A custom exception that can take action based on an HTTP error code
"""
def __init__(self, code, message):
self.code = code
Exception.__init__(self, "{}: {}".format(code, message))
def mkdir_p(path):
"""
mkdir -p
http://stackoverflow.com/a/600612/127816
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_body(environ):
"""
Pull the body from the request and return it
"""
length = environ.get("CONTENT_LENGTH", "0")
length = 0 if length == "" else int(length)
return environ["wsgi.input"].read(length)
def get_json(environ):
"""
Return the request body as JSON
"""
content_type = environ.get("CONTENT_TYPE", "")
if content_type != "application/json":
raise HTTPError(406, "JSON required")
try:
return salt.utils.json.loads(read_body(environ))
except ValueError as exc:
raise HTTPError(400, exc)
def get_headers(data, extra_headers=None):
"""
Takes the response data as well as any additional headers and returns a
tuple of tuples of headers suitable for passing to start_response()
"""
response_headers = {
"Content-Length": str(len(data)),
}
if extra_headers:
response_headers.update(extra_headers)
return list(response_headers.items())
def run_chunk(environ, lowstate):
"""
Expects a list of lowstate dictionaries that are executed and returned in
order
"""
client = environ["SALT_APIClient"]
for chunk in lowstate:
yield client.run(chunk)
def dispatch(environ):
"""
Do any path/method dispatching here and return a JSON-serializable data
structure appropriate for the response
"""
method = environ["REQUEST_METHOD"].upper()
if method == "GET":
return "They found me. I don't know how, but they found me. Run for it, Marty!"
elif method == "POST":
data = get_json(environ)
return run_chunk(environ, data)
else:
raise HTTPError(405, "Method Not Allowed")
def saltenviron(environ):
"""
Make Salt's opts dict and the APIClient available in the WSGI environ
"""
if "__opts__" not in locals():
import salt.config
__opts__ = salt.config.client_config(
os.environ.get("SALT_MASTER_CONFIG", "/etc/salt/master")
)
environ["SALT_OPTS"] = __opts__
environ["SALT_APIClient"] = salt.netapi.NetapiClient(__opts__)
def application(environ, start_response):
"""
Process the request and return a JSON response. Catch errors and return the
appropriate HTTP code.
"""
# Instantiate APIClient once for the whole app
saltenviron(environ)
# Call the dispatcher
try:
resp = list(dispatch(environ))
code = 200
except HTTPError as exc:
code = exc.code
resp = str(exc)
except salt.exceptions.EauthAuthenticationError as exc:
code = 401
resp = str(exc)
except Exception as exc: # pylint: disable=broad-except
code = 500
resp = str(exc)
# Convert the response to JSON
try:
ret = salt.utils.json.dumps({"return": resp})
except TypeError as exc:
code = 500
ret = str(exc)
# Return the response
start_response(H[code], get_headers(ret, {"Content-Type": "application/json"}))
return (ret,)
def get_opts():
"""
Return the Salt master config as __opts__
"""
import salt.config
return salt.config.client_config(
os.environ.get("SALT_MASTER_CONFIG", "/etc/salt/master")
)
def start():
"""
Start simple_server()
"""
from wsgiref.simple_server import make_server
# When started outside of salt-api __opts__ will not be injected
if "__opts__" not in globals():
globals()["__opts__"] = get_opts()
if __virtual__() is False:
raise SystemExit(1)
mod_opts = __opts__.get(__virtualname__, {})
# pylint: disable=C0103
httpd = make_server("localhost", mod_opts["port"], application)
try:
httpd.serve_forever()
except KeyboardInterrupt:
raise SystemExit(0)
if __name__ == "__main__":
start()
|
|
##
# The MIT License (MIT)
#
# Copyright (c) 2015 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import struct
import serial
import tempfile
import subprocess
MHz = 1000000
KHz = 1000
Hz = 1
class RegIoUartDriver(object):
"""
Low level FPGA register access using serial line.
"""
def __init__(self, port='/dev/ttyUSB1', baudrate=921600, timeout=1):
"""
:param port:
:param baudrate:
:param timeout:
:return:
"""
self.serial = serial.Serial(port=port, baudrate=baudrate, timeout=timeout)
def read(self, register, count=0):
"""
:param register:
:param count:
:return:
"""
if count:
self.serial.write(''.join([chr(register), chr(0xff & count)]))
mem = self.serial.read(count)
return [ord(x) for x in mem]
else:
self.serial.write(chr(register))
return ord(self.serial.read(1))
def write(self, register, value):
"""
:param register:
:param value:
:return:
"""
self.serial.write(''.join([chr(0x80 | register), chr(0xff & value)]))
class LogicSnifferDriver(object):
"""
Access the FPGA registers through Python properties.
"""
MAX_SAMPLE_CLK = 48 * MHz
MEM_SIZE = 0x4000
REG_LED = 0x00
REG_MEMADR_RD_LO = 0x01
REG_MEMADR_RD_HI = 0x02
REG_MEMADR_WR_LO = 0x03
REG_MEMADR_WR_HI = 0x04
REG_MEM = 0x05
REG_STATUS = 0x06
REG_TRIG_EN = 0x07
REG_TRIG_VAL = 0x08
REG_CLKDIV_LO = 0x09
REG_CLKDIV_HI = 0x0A
LED_0 = 0b00000001
LED_1 = 0b00000010
LED_2 = 0b00000100
LED_3 = 0b00001000
LED_4 = 0b00010000
LED_5 = 0b00100000
LED_6 = 0b01000000
LED_7 = 0b10000000
PIN_0 = 0b00000001
PIN_1 = 0b00000010
PIN_2 = 0b00000100
PIN_3 = 0b00001000
PIN_4 = 0b00010000
PIN_5 = 0b00100000
PIN_6 = 0b01000000
PIN_7 = 0b10000000
def __init__(self, reg_io):
"""
:param reg_io:
:return:
"""
self.rio = reg_io
self.rio.write(LogicSnifferDriver.REG_LED, 0)
def __del__(self):
"""
:return:
"""
self.rio.write(LogicSnifferDriver.REG_LED, 0)
@property
def status(self):
"""
:return:
"""
return self.rio.read(LogicSnifferDriver.REG_STATUS)
@status.setter
def status(self, value):
"""
:param value:
:return:
"""
self.rio.write(LogicSnifferDriver.REG_STATUS, 0xff & value)
@property
def led(self):
"""
:return:
"""
return self.rio.read(LogicSnifferDriver.REG_LED)
@led.setter
def led(self, value):
"""
:param value:
:return:
"""
self.rio.write(LogicSnifferDriver.REG_LED, 0xff & value)
@property
def mem_rd_address(self):
"""
:return:
"""
return (self.rio.read(LogicSnifferDriver.REG_MEMADR_RD_LO) |
(self.rio.read(LogicSnifferDriver.REG_MEMADR_RD_HI) << 8)) & 0xffff
@mem_rd_address.setter
def mem_rd_address(self, value):
"""
:param value:
:return:
"""
self.rio.write(LogicSnifferDriver.REG_MEMADR_RD_LO, (value >> 0) & 0xff)
self.rio.write(LogicSnifferDriver.REG_MEMADR_RD_HI, (value >> 8) & 0xff)
@property
def mem_wr_address(self):
"""
:return:
"""
return (self.rio.read(LogicSnifferDriver.REG_MEMADR_WR_LO) |
(self.rio.read(LogicSnifferDriver.REG_MEMADR_WR_HI) << 8)) & 0xffff
@property
def mem(self):
"""
:return:
"""
# set initial read address to 0
self.mem_rd_address = 0
samples = []
led_state = 0x00
led_bit = 1
junk_size = 0x80
mem_size = LogicSnifferDriver.MEM_SIZE / junk_size
step_cnt = mem_size / 8
self.led = led_state
for i in range(mem_size):
if i % step_cnt == 0:
led_state |= led_bit
led_bit <<= 1
self.led = led_state
samples += self.rio.read(LogicSnifferDriver.REG_MEM, junk_size)
return samples
@property
def trigger_en(self):
"""
:return:
"""
return self.rio.read(LogicSnifferDriver.REG_TRIG_EN)
@trigger_en.setter
def trigger_en(self, value):
"""
:param value:
:return:
"""
self.rio.write(LogicSnifferDriver.REG_TRIG_EN, 0xff & value)
@property
def trigger_val(self):
"""
:return:
"""
return self.rio.read(LogicSnifferDriver.REG_TRIG_VAL)
@trigger_val.setter
def trigger_val(self, value):
"""
:param value:
:return:
"""
self.rio.write(LogicSnifferDriver.REG_TRIG_VAL, 0xff & value)
@property
def sample_clk_div(self):
"""
:return:
"""
return (self.rio.read(LogicSnifferDriver.REG_CLKDIV_LO) |
(self.rio.read(LogicSnifferDriver.REG_CLKDIV_HI) << 8)) & 0xffff
@sample_clk_div.setter
def sample_clk_div(self, value):
"""
:param value:
:return:
"""
self.rio.write(LogicSnifferDriver.REG_CLKDIV_LO, (value >> 0) & 0xff)
self.rio.write(LogicSnifferDriver.REG_CLKDIV_HI, (value >> 8) & 0xff)
class LogicSniffer(object):
"""
High level access to the logic sniffer functionality.
"""
def __init__(self, ls_drv):
"""
:param ls_drv:
:return:
"""
self.drv = ls_drv
self._sample_clk = LogicSnifferDriver.MAX_SAMPLE_CLK
self._sample_count = LogicSnifferDriver.MEM_SIZE
self._trigger_en = 0
self._trigger_val = 0
self._samples = []
def __del__(self):
"""
:return:
"""
pass
@property
def sample_clock(self):
"""
:return:
"""
return self._sample_clk
@sample_clock.setter
def sample_clock(self, value):
"""
Valid clocks:
48 * MHz => highest freq!
24 * MHz
16 * MHz
12 * MHz
8 * MHz
6 * MHz
4 * MHz
2 * MHz
1 * MHz
800 * KHz
600 * KHz
400 * KHz
200 * KHz
100 * KHz
80 * KHz
...
10 * KHz
8 * KHz
...
1 * KHz
800 * Hz => smalles freq!
:param value:
:return:
"""
assert value <= LogicSnifferDriver.MAX_SAMPLE_CLK
assert value > 799
self._sample_clk = value
@property
def trigger(self):
"""
:return:
"""
return self._trigger_en, self._trigger_val
@trigger.setter
def trigger(self, value):
"""
:param value:
:return:
"""
self._trigger_en = value[0]
self._trigger_val = value[1]
@property
def samples(self):
"""
:return:
"""
return self._samples
def sample(self, use_trigger=False):
"""
:param use_trigger:
:return:
"""
self._samples = []
# set sample clock divider
self.drv.sample_clk_div = int(LogicSnifferDriver.MAX_SAMPLE_CLK / self._sample_clk)
if use_trigger:
self.drv.trigger_en = self._trigger_en
self.drv.trigger_val = self._trigger_val
# enable sampling and trigger
self.drv.status = 0x05
else:
# enable sampling
self.drv.status = 0x01
# wait until sampling is done
while self.drv.status & 0x01:
pass
# read back the samples
self._samples = self.drv.mem
def write_raw(self, file_name):
with open(file_name, 'wb') as f:
for c in self._samples:
f.write(struct.pack('B', c))
def write_sr(self, file_name):
temp = tempfile.NamedTemporaryFile(delete=False)
try:
for c in self._samples:
temp.write(struct.pack('B', c))
finally:
temp.close()
subprocess.call(['sigrok-cli',
'-I', 'binary:numchannels=8:samplerate=%d' % self._sample_clk,
'-i', temp.name,
'-o', file_name])
def write_vcd(self, file_name, module='LogicSniffer'):
"""
:param file_name:
:param module:
:return:
"""
pin_map = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H'}
# timescale = 1000000000 / self._sample_clk
with open(file_name, 'w') as f:
# f.write('$timescale %fns $end\n' % timescale)
f.write('$timescale 166us $end\n')
f.write('$scope module %s $end\n' % module)
for k, v in pin_map.iteritems():
f.write('$var wire 1 %s PIN%d $end\n' % (v, k))
f.write('$upscope $end\n')
f.write('$enddefinitions $end\n')
t = 0
for t in range(len(self._samples)):
if t == 0:
f.write('#%d\n' % t)
for j in range(8):
if (self._samples[t] >> j) & 1:
f.write('1%s\n' % pin_map[j])
else:
f.write('0%s\n' % pin_map[j])
elif self._samples[t] != self._samples[t - 1]:
f.write('#%d\n' % t)
for j in range(8):
if (self._samples[t] >> j) & 1 != (self._samples[t - 1] >> j) & 1:
if (self._samples[t] >> j) & 1:
f.write('1%s\n' % pin_map[j])
else:
f.write('0%s\n' % pin_map[j])
f.write('#%d\n' % t)
def dump_samples(self):
"""
:return:
"""
i = 0
for s in self._samples:
if i % 32 == 0:
print('\n%04x |' % i),
print('%02x' % s),
i += 1
if __name__ == "__main__":
rio = RegIoUartDriver()
lsd = LogicSnifferDriver(rio)
lsn = LogicSniffer(lsd)
lsn.sample_clock = 48 * MHz # LogicSnifferDriver.MAX_SAMPLE_CLK
lsn.trigger = (0xff, 0x01)
lsn.sample(use_trigger=True)
lsn.dump_samples()
lsn.write_vcd('test.vcd')
lsn.write_sr('test.sr')
|
|
#!/usr/bin/env python
#########################################################################################
# Extract raw intracranial volume in cubic millimeters.
# Method: bias field correction (N4) followed by SIENAx or RBM
# RBM (reverse MNI brain masking) is adapted from Keihaninejad (2010)
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: William Thong
# Modified: 2014-09-01
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os, sys, getopt, re
# Default parameters
# ==========================================================================================
class Param:
def __init__(self):
self.N4Correct = True #Need to be True for correct -f value in BET
self.ImageDimension = 3
# MNI152 templates (T1 2mm)
self.path_atlas='${FSLDIR}/data/standard/MNI152_T1_2mm_brain.nii.gz'
self.path_mask_wm='${FSLDIR}/data/standard/tissuepriors/avg152T1_white.img'
self.path_mask_gm='${FSLDIR}/data/standard/tissuepriors/avg152T1_gray.img'
# default parameters of the main
self.contrast = 't1'
self.method = 'sienax'
self.verbose = 1
self.debugging=False
def main():
# Initialization
input_path = ''
contrast = param.contrast
output_path = ''
method = param.method
verbose = param.verbose
# Check input parameters
if param.debugging:
pass
else:
try:
opts, args = getopt.getopt(sys.argv[1:],'hi:c:o:d:v:')
except getopt.GetoptError:
usage()
if not opts:
usage()
if not opts:
# no option supplied
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ("-i"):
input_path = arg
exist_image(input_path)
elif opt in ("-o"):
output_path = arg
elif opt in ("-c"):
contrast = arg
elif opt in ("-d"):
method = arg
elif opt in ("-v"):
verbose = int(arg)
# check mandatory arguments
if input_path == '' :
print('\nError: Wrong input path, "'+input_path+'" not valid')
usage()
if contrast != 't1' and contrast != 't2':
print('\nError: Wrong contrast, "'+contrast+'" not valid')
usage()
if method != 'sienax' and method != 'rbm':
print('\nError: Wrong method, "'+method+'" not valid')
usage()
# Normalize paths (remove trailing slash)
input_path = os.path.normpath(input_path)
# Extract path, file and extension
path_input, file_input, ext_input = extract_fname(input_path)
# define output_path
if output_path=='':
output_path=path_input
else:
output_path = os.path.normpath(output_path)+'/'
#check existence of directories (if exists, removes subdirectories; if not, creates directory)
if not os.path.exists(output_path):
os.makedirs(output_path)
# print arguments
if verbose:
print 'Check input parameters...'
print '.. Image: '+input_path
print '.. Contrast: '+contrast
print '.. Output directory: '+output_path
print '.. Method: '+method
####################################
# The core of the code starts here #
####################################
#-----------------------------------
# Field correction with N4
# Note that N3 is implemented in BET, use -B parameter
#-----------------------------------
if param.N4Correct:
cmd='N4BiasFieldCorrection -d '+str(param.ImageDimension)+' -i '+input_path+' -o '+output_path+file_input+'_n4'+ext_input
print(">> "+cmd)
os.system(cmd)
file_input = file_input+'_n4'
if method=='sienax':
#-----------------------------------
# Parameters for bet and use SIENAx
#-----------------------------------
# -f Fractional intensity threshold (smaller values give larger brain) equals 0.2 for both contrasts
# -R Robust brain centre estimation (iterates BET several times)
if contrast == 't2':
frac_int = 0.2
# note that -t2 parameter is important in SIENAx to estimate white matter volume properly
cmd='sienax '+output_path+file_input+ext_input+' -o '+output_path+file_input+'_sienax -B "-f '+str(frac_int)+' -R" -t2'
print(">> "+cmd)
os.system(cmd)
elif contrast == 't1':
frac_int = 0.2
cmd='sienax '+output_path+file_input+ext_input+' -o '+output_path+file_input+'_sienax -B "-f '+str(frac_int)+' -R"'
print(">> "+cmd)
os.system(cmd)
#-----------------------------------
# Read SIENAx report to extract raw brain ICV in cubic millimeters and write final value in icv.txt
#-----------------------------------
report = parse_report(output_path+file_input+'_sienax/report.sienax')
print('Writting icv value in "'+output_path+'icv.txt"...')
fo = open(output_path+"icv.txt", "wb")
fo.write(str(report['brain']['raw']))
fo.close()
print('\n\n\nEstimated brain volume: '+str(report['brain']['raw'])+' mm3\n\n\n')
print('\n\n\nDone!')
if method=='rbm':
# Brain extraction
frac_int = 0.2
file_output='tmp.brain.'+file_input
cmd = 'bet '+output_path+file_input+ext_input+' '+output_path+file_output+ext_input+' -R -f '+str(frac_int)
print(">> "+cmd)
os.system(cmd)
# Swap dimensions to correspond to the template
swapping = get_orient(output_path+file_output+ext_input)
file_output='tmp.brain.reorient.'+file_input
cmd = 'fslswapdim '+output_path+'tmp.brain.'+file_input+ext_input+' '+swapping+' '+output_path+file_output+ext_input
print(">> "+cmd)
os.system(cmd)
# Resample in 2mm to match template voxel size
cmd = 'isct_c3d '+output_path+file_output+ext_input+' -resample-mm 2.0x2.0x2.0mm '+output_path+file_output+'_2mm'+ext_input
print(">> "+cmd)
os.system(cmd)
file_output=file_output+'_2mm'
source_img=file_output
# Registration on template
cmd = 'flirt -dof 6 -ref '+param.path_atlas+' -in '+output_path+file_output+ext_input+' -out '+output_path+file_output+'_reg'+ext_input+' -omat '+output_path+'tmp.'+file_input+'_affine_transf.mat'
print(">> "+cmd)
os.system(cmd)
file_output = file_output+'_reg'
# Histogram Normalization of gm template (note: 245 is the max intensity in the MNI152 gm template)
cmd = 'fslmaths '+param.path_mask_gm+' -div 245 '+output_path+'tmp.brain_gm'+ext_input
print(">> "+cmd)
os.system(cmd)
# Histogram Normalization of wm template (note: 253 is the max intensity in the MNI152 wm template)
cmd = 'fslmaths '+param.path_mask_wm+' -div 253 '+output_path+'tmp.brain_wm'+ext_input
print(">> "+cmd)
os.system(cmd)
# Apply binary mask for gm
cmd = 'fslmaths '+output_path+file_output+ext_input+' -mul '+output_path+'tmp.brain_gm'+ext_input+' '+output_path+file_output+'_gm'+ext_input
print(">> "+cmd)
os.system(cmd)
# Apply binary mask for wm
cmd = 'fslmaths '+output_path+file_output+ext_input+' -mul '+output_path+'tmp.brain_wm'+ext_input+' '+output_path+file_output+'_wm'+ext_input
print(">> "+cmd)
os.system(cmd)
# Merge gm and wm
cmd = 'fslmaths '+output_path+file_output+'_gm'+ext_input+' -add '+output_path+file_output+'_wm'+ext_input+' '+output_path+file_output+'_masked'+ext_input
print(">> "+cmd)
os.system(cmd)
# Threshold the previous merge by the mean intensity value of non zeros voxels
p = os.popen('fslstats '+output_path+file_output+'_masked'+ext_input+' -M')
s = p.readline()
p.close()
cmd = 'fslmaths '+output_path+file_output+'_masked'+ext_input+' -thr '+str(float(s)*6./5)+' '+output_path+file_output+'_masked_thr'+ext_input
print(">> "+cmd)
os.system(cmd)
file_output = file_output+'_masked_thr'
# invert transformation matrix
cmd = 'convert_xfm -omat '+output_path+'tmp.'+file_input+'_affine_inverse_transf.mat -inverse '+output_path+'tmp.'+file_input+'_affine_transf.mat'
print(">> "+cmd)
os.system(cmd)
# apply inverse transmation matrix
cmd = 'flirt -ref '+output_path+source_img+ext_input+' -in '+output_path+file_output+ext_input+' -out '+output_path+file_output+'_inv'+ext_input+' -init '+output_path+'tmp.'+file_input+'_affine_inverse_transf.mat -applyxfm'
print(">> "+cmd)
os.system(cmd)
file_output = file_output+'_inv'
# reslice in initial space
cmd = 'isct_c3d '+output_path+file_input+ext_input+' '+output_path+file_output+ext_input+' -reslice-identity -o '+output_path+file_input+'_brain'+ext_input
print(">> "+cmd)
os.system(cmd)
# write final icv in icv.txt
print('Writting icv value in "'+output_path+'icv.txt"...')
p = os.popen('fslstats '+output_path+file_input+'_brain'+ext_input+' -V')
s = p.readline()
p.close()
fo = open(output_path+"icv.txt", "wb")
fo.write(s.split(' ',1)[0])
fo.close()
print('\n\n\nEstimated brain volume: '+s.split(' ',1)[0]+' mm3\n\n\n')
# remove all the tempopary files created
print('\nDelete temporary files...')
cmd = 'rm '+output_path+'tmp.*'
print(">> "+cmd)
os.system(cmd)
print('\n\n\nDone!')
def usage():
""" Print usage. """
path_func, file_func, ext_func = extract_fname(sys.argv[0])
print '\n' \
''+os.path.basename(__file__)+'\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' \
'Part of the Spinal Cord Toolbox <https://sourceforge.net/projects/spinalcordtoolbox>\n' \
'\n'\
'DESCRIPTION\n' \
' This program extracts raw intracranial volume in cubic millimeters.\n' \
' Method: bias field correction (N4) followed by SIENAx or RBM. RBM (reverse MNI brain masking) is adapted from Keihaninejad (2010).' \
'\n' \
'USAGE\n' \
' '+file_func+ext_func+' -i <inputvol> [options]\n\n' \
'MANDATORY ARGUMENTS\n' \
' -i inputvol image to extract values from\n' \
'\n' \
'OPTIONAL ARGUMENTS\n' \
' -c contrast image contrast. t1 or t2 (default=t1), e.g. -c t1\n' \
' -o output set output directory (create directory if not exists) \n' \
' -d method method to estimate ICV. sienax or rbm (default=sienax), e.g. -d rbm \n' \
' -v verbose verbose. 0 or 1. (default=1).\n' \
'\n' \
sys.exit(2)
def exist_image(fname):
""" Check existence of a file. """
if os.path.isfile(fname) or os.path.isfile(fname+'.nii') or os.path.isfile(fname+'.nii.gz'):
pass
else:
print('\nERROR: '+fname+' does not exist. Exit program.\n')
sys.exit(2)
def extract_fname(fname):
""" Extracts path, file and extension. """
# extract path
path_fname = os.path.dirname(fname)+'/'
# check if only single file was entered (without path)
if path_fname == '/':
path_fname = ''
# extract file and extension
file_fname = fname
file_fname = file_fname.replace(path_fname,'')
file_fname, ext_fname = os.path.splitext(file_fname)
# check if .nii.gz file
if ext_fname == '.gz':
file_fname = file_fname[0:len(file_fname)-4]
ext_fname = ".nii.gz"
return path_fname, file_fname, ext_fname
def get_orient(fname):
""" Get the orientation (NEUROLOGICAL or RADIOLOGICAL) and return the correct swapping. """
print("Orientation of the image:")
p = os.popen('fslorient '+fname)
s = p.readline()
p.close()
print(s)
if 'NEUROLOGICAL' in s:
swap = 'LR PA IS'
elif 'RADIOLOGICAL' in s:
swap = 'RL PA IS'
else:
print("Error could not find the orientation with fslorient")
return swap
def parse_report(path):
""" Return the volume informations contained in the SIENAX report. This
is a dictionary with keys "grey", "white", and "brain".
The informations for the different tissues is a dictionary with the
normalized and raw values, in cubic millimeters.
adapted from: http://code.google.com/p/medipy/source/browse/plugins/fsl/sienax.py
see license: http://code.google.com/p/medipy/source/browse/LICENSE
"""
report = {}
fd = open(path)
for line in fd.readlines() :
for tissue in ["GREY", "WHITE", "BRAIN"] :
pattern = tissue + r"\s+([\d+\.]+)\s+([\d+\.]+)"
measure = re.match(pattern, line)
if measure :
normalized = float(measure.group(1))
raw = float(measure.group(2))
report[tissue.lower()] = {"normalized" : normalized, "raw" : raw}
continue
vscale = re.match("VSCALING ([\d\.]+)", line)
if vscale :
report["vscale"] = float(vscale.group(1))
return report
# START PROGRAM
# ==========================================================================================
if __name__ == "__main__":
param = Param()
main()
|
|
import os
import mock
import redis
from scrapy import Request, Spider
from unittest import TestCase
from . import connection
from .dupefilter import RFPDupeFilter
from .queue import SpiderQueue, SpiderPriorityQueue, SpiderStack
from .scheduler import Scheduler
# allow test settings from environment
REDIS_HOST = os.environ.get('REDIST_HOST', 'localhost')
REDIS_PORT = int(os.environ.get('REDIS_PORT', 6379))
class RedisTestMixin(object):
@property
def server(self):
if not hasattr(self, '_redis'):
self._redis = redis.Redis(REDIS_HOST, REDIS_PORT)
return self._redis
def clear_keys(self, prefix):
keys = self.server.keys(prefix + '*')
if keys:
self.server.delete(*keys)
class DupeFilterTest(RedisTestMixin, TestCase):
def setUp(self):
self.key = 'scrapy_redis:tests:dupefilter:'
self.df = RFPDupeFilter(self.server, self.key)
def tearDown(self):
self.clear_keys(self.key)
def test_dupe_filter(self):
req = Request('http://example.com')
self.assertFalse(self.df.request_seen(req))
self.assertTrue(self.df.request_seen(req))
self.df.close('nothing')
class QueueTestMixin(RedisTestMixin):
queue_cls = None
def setUp(self):
self.spider = Spider('myspider')
self.key = 'scrapy_redis:tests:%s:queue' % self.spider.name
self.q = self.queue_cls(self.server, Spider('myspider'), self.key)
def tearDown(self):
self.clear_keys(self.key)
def test_clear(self):
self.assertEqual(len(self.q), 0)
for i in range(10):
# XXX: can't use same url for all requests as SpiderPriorityQueue
# uses redis' set implemention and we will end with only one
# request in the set and thus failing the test. It should be noted
# that when using SpiderPriorityQueue it acts as a request
# duplication filter whenever the serielized requests are the same.
# This might be unwanted on repetitive requests to the same page
# even with dont_filter=True flag.
req = Request('http://example.com/?page=%s' % i)
self.q.push(req)
self.assertEqual(len(self.q), 10)
self.q.clear()
self.assertEqual(len(self.q), 0)
class SpiderQueueTest(QueueTestMixin, TestCase):
queue_cls = SpiderQueue
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop()
self.assertEqual(out1.url, req1.url)
self.assertEqual(out2.url, req2.url)
class SpiderPriorityQueueTest(QueueTestMixin, TestCase):
queue_cls = SpiderPriorityQueue
def test_queue(self):
req1 = Request('http://example.com/page1', priority=100)
req2 = Request('http://example.com/page2', priority=50)
req3 = Request('http://example.com/page2', priority=200)
self.q.push(req1)
self.q.push(req2)
self.q.push(req3)
out1 = self.q.pop()
out2 = self.q.pop()
out3 = self.q.pop()
self.assertEqual(out1.url, req3.url)
self.assertEqual(out2.url, req1.url)
self.assertEqual(out3.url, req2.url)
class SpiderStackTest(QueueTestMixin, TestCase):
queue_cls = SpiderStack
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop()
self.assertEqual(out1.url, req2.url)
self.assertEqual(out2.url, req1.url)
class SchedulerTest(RedisTestMixin, TestCase):
def setUp(self):
self.persist = False
self.key_prefix = 'scrapy_redis:tests:'
self.queue_key = self.key_prefix + '%(spider)s:requests'
self.dupefilter_key = self.key_prefix + '%(spider)s:dupefilter'
self.idle_before_close = 0
self.scheduler = Scheduler(self.server, self.persist, self.queue_key,
SpiderQueue, self.dupefilter_key,
self.idle_before_close)
self.spider = Spider('myspider')
def tearDown(self):
self.clear_keys(self.key_prefix)
def test_scheduler(self):
# default no persist
self.assertFalse(self.scheduler.persist)
self.scheduler.open(self.spider)
self.assertEqual(len(self.scheduler), 0)
req = Request('http://example.com')
self.scheduler.enqueue_request(req)
self.assertTrue(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 1)
# dupefilter in action
self.scheduler.enqueue_request(req)
self.assertEqual(len(self.scheduler), 1)
out = self.scheduler.next_request()
self.assertEqual(out.url, req.url)
self.assertFalse(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 0)
self.scheduler.close('finish')
def test_scheduler_persistent(self):
# TODO: Improve this test to avoid the need to check for log messages.
self.spider.log = mock.Mock(spec=self.spider.log)
self.scheduler.persist = True
self.scheduler.open(self.spider)
self.assertEqual(self.spider.log.call_count, 0)
self.scheduler.enqueue_request(Request('http://example.com/page1'))
self.scheduler.enqueue_request(Request('http://example.com/page2'))
self.assertTrue(self.scheduler.has_pending_requests())
self.scheduler.close('finish')
self.scheduler.open(self.spider)
self.spider.log.assert_has_calls([
mock.call("Resuming crawl (2 requests scheduled)"),
])
self.assertEqual(len(self.scheduler), 2)
self.scheduler.persist = False
self.scheduler.close('finish')
self.assertEqual(len(self.scheduler), 0)
class ConnectionTest(TestCase):
# We can get a connection from just REDIS_URL.
def test_redis_url(self):
settings = dict(
REDIS_URL = 'redis://foo:bar@localhost:9001/42'
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We can get a connection from REDIS_HOST/REDIS_PORT.
def test_redis_host_port(self):
settings = dict(
REDIS_HOST = 'localhost',
REDIS_PORT = 9001
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
# REDIS_URL takes precedence over REDIS_HOST/REDIS_PORT.
def test_redis_url_precedence(self):
settings = dict(
REDIS_HOST = 'baz',
REDIS_PORT = 1337,
REDIS_URL = 'redis://foo:bar@localhost:9001/42'
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We fallback to REDIS_HOST/REDIS_PORT if REDIS_URL is None.
def test_redis_host_port_fallback(self):
settings = dict(
REDIS_HOST = 'baz',
REDIS_PORT = 1337,
REDIS_URL = None
)
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'baz')
self.assertEqual(connect_args['port'], 1337)
# We use default values for REDIS_HOST/REDIS_PORT.
def test_redis_default(self):
settings = dict()
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 6379)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Modifications copyright (C) 2016 cwhypt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
|
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
CPU backend layers
"""
import math
from operator import mul
def ceil_div(x, y):
"""
same as int(ceil(float(x)/y)), so no need to import math lib
"""
return -(-x // y)
class ConvLayer(object):
"""
ConvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
"""
def __init__(self, lib, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
bsum=False):
# Compute the output spatial dimensions
M = lib.output_dim(D, T, pad_d, str_d)
P = lib.output_dim(H, R, pad_h, str_h)
Q = lib.output_dim(W, S, pad_w, str_w)
self.C = C
self.K = K
self.M = M
self.P = P
self.Q = Q
self.NCK = (N, C, K)
self.TRS = (T, R, S)
self.DHW = (D, H, W)
self.MPQ = (M, P, Q)
self.padding = (pad_d, pad_h, pad_w)
self.strides = (str_d, str_h, str_w)
self.bsum = bsum
self.dimI = (C, D, H, W, N)
self.dimF = (C, T, R, S, K)
self.dimO = (K, M, P, Q, N)
self.dimI2 = (C * D * H * W, N)
self.dimF2 = (C * T * R * S, K)
self.dimO2 = (K * M * P * Q, N)
self.sizeI = reduce(mul, self.dimI, 1)
self.sizeF = reduce(mul, self.dimF, 1)
self.sizeO = reduce(mul, self.dimO, 1)
self.nOut = reduce(mul, self.MPQ, 1) * K
self.mSlice = [self.fprop_slice(m, T, D, pad_d, str_d) for m in range(M)]
self.pSlice = [self.fprop_slice(p, R, H, pad_h, str_h) for p in range(P)]
self.qSlice = [self.fprop_slice(q, S, W, pad_w, str_w) for q in range(Q)]
self.dSlice = [self.bprop_slice(d, T, M, pad_d, str_d) for d in range(D)]
self.hSlice = [self.bprop_slice(h, R, P, pad_h, str_h) for h in range(H)]
self.wSlice = [self.bprop_slice(w, S, Q, pad_w, str_w) for w in range(W)]
def fprop_slice(self, q, S, X, padding, strides):
firstF = 0
lastF = S - 1
qs = q * strides - padding
x2 = qs + lastF
if qs < 0:
firstF = -qs
qs = 0
if x2 >= X:
dif = x2 - X + 1
lastF -= dif
x2 -= dif
return (slice(firstF, lastF+1), slice(qs, x2+1), lastF-firstF+1)
def bprop_slice(self, x, S, Q, padding, strides):
qs = x - (S - padding - 1)
sliceF = []
sliceO = []
for s in range(S):
q = qs + s
if q % strides == 0:
q //= strides
if q >= 0 and q < Q:
sliceF.append(S - s - 1)
sliceO.append(q)
return sliceF, sliceO
class DeconvLayer(ConvLayer):
"""
DeconvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of output feature maps
K: Number of input feature maps
P: Height of input
Q: Width of input
D: Depth of output image
H: Height of output image
W: Width of output image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
"""
def __init__(self, lib, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
# Set T, M and D to be consts.
T = 1
M = 1
D = 1
# Cannot get exact, e.g. because not unique
H = (P - 1) * str_h - 2 * pad_h + R
W = (Q - 1) * str_w - 2 * pad_w + S
# Add below to get H and W tracked
self.H = H
self.W = W
self.C = C
self.K = K
self.M = M
self.P = P
self.Q = Q
self.NCK = (N, C, K)
self.TRS = (T, R, S)
self.DHW = (D, H, W)
self.MPQ = (M, P, Q)
self.padding = (pad_d, pad_h, pad_w)
self.strides = (str_d, str_h, str_w)
# Did not change the names of dimI, dimO, etc. even though dimI is now technically the
# dimension of the output
self.dimI = (C, D, H, W, N)
self.dimF = (C, T, R, S, K)
self.dimO = (K, M, P, Q, N)
self.dimI2 = (C * D * H * W, N)
self.dimF2 = (C * T * R * S, K)
self.dimO2 = (K * M * P * Q, N)
self.sizeI = reduce(mul, self.dimI, 1)
self.sizeF = reduce(mul, self.dimF, 1)
self.sizeO = reduce(mul, self.dimO, 1)
# nOut has to change because P and Q are now the inputs
self.nOut = reduce(mul, self.DHW, 1) * C
self.dSlice = [self.bprop_slice(d, T, M, pad_d, str_d) for d in range(D)]
self.hSlice = [self.bprop_slice(h, R, P, pad_h, str_h) for h in range(H)]
self.wSlice = [self.bprop_slice(w, S, Q, pad_w, str_w) for w in range(W)]
self.mSlice = [self.fprop_slice(m, T, D, pad_d, str_d) for m in range(M)]
self.pSlice = [self.fprop_slice(p, R, H, pad_h, str_h) for p in range(P)]
self.qSlice = [self.fprop_slice(q, S, W, pad_w, str_w) for q in range(Q)]
class PoolLayer(object):
"""
PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
op: max, avg, l2 pooling
N: Number of images in mini-batch
C: Number of input feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
J: Size of feature map pooling window (maxout n_pieces)
T: Depth of pooling window
R: Height of pooling window
S: Width of pooling window
padding: amount of zero-padding around the given image or feature map edge
strides: factor to step the window by in a given direction (overlap allowed)
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
def __init__(self, lib, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_c=0, pad_d=0, pad_h=0, pad_w=0,
str_c=None, str_d=None, str_h=None, str_w=None):
# default to non-overlapping
if str_c is None:
str_c = J
if str_d is None:
str_d = T
if str_h is None:
str_h = R
if str_w is None:
str_w = S
if str_c < J or str_d < T or str_h < R or str_w < S:
self.overlap = (math.ceil(float(J) / str_c) *
math.ceil(float(T) / str_d) *
math.ceil(float(R) / str_h) *
math.ceil(float(S) / str_w))
else:
self.overlap = 0.0
# Compute the output dimensions
K = lib.output_dim(C, J, pad_c, str_c, pooling=True)
M = lib.output_dim(D, T, pad_d, str_d, pooling=True)
P = lib.output_dim(H, R, pad_h, str_h, pooling=True)
Q = lib.output_dim(W, S, pad_w, str_w, pooling=True)
self.op = op
self.C = C
self.K = K
self.M = M
self.P = P
self.Q = Q
self.N = N
self.JTRS = (J, T, R, S)
self.DHW = (D, H, W)
self.MPQ = (M, P, Q)
self.padding = (pad_c, pad_d, pad_h, pad_w)
self.strides = (str_c, str_d, str_h, str_w)
self.dimI = (C, D, H, W, N)
self.dimO = (K, M, P, Q, N)
self.dimF2 = None
self.dimI2 = (C * D * H * W, N)
self.dimO2 = (K * M * P * Q, N)
self.sizeI = reduce(mul, self.dimI, 1)
self.sizeO = reduce(mul, self.dimO, 1)
self.nOut = reduce(mul, self.MPQ, 1) * K
self.kSlice = [self.pool_slice(k, J, C, pad_c, str_c) for k in range(K)]
self.mSlice = [self.pool_slice(m, T, D, pad_d, str_d) for m in range(M)]
self.pSlice = [self.pool_slice(p, R, H, pad_h, str_h) for p in range(P)]
self.qSlice = [self.pool_slice(q, S, W, pad_w, str_w) for q in range(Q)]
def pool_slice(self, q, S, X, padding, strides):
qs = q * strides - padding
firstI = None
for s in range(S):
x = qs + s
if x >= 0 and x < X:
if firstI is None:
firstI = x
lastI = x
return (slice(firstI, lastI+1), lastI-firstI+1)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
from xlrd import open_workbook
con = None
program_list =[]
measure_list = []
##This is a bit of code that will parse an excel file and read records to a postgreSQL db. For this example the data consists
##of clinical quality measures and the government programs asscociated with them.
##Here the function opens the excel workbook .xlsx and the specific spreadsheet then reads the list of program names at the
##top of the sheet. It iterates over the correct cells using a for loop over a range of the correct cells and returns a list
##of the desired cell values.
def readProgs():
book = open_workbook('aug_3_measures.xlsx','r')
sheet = book.sheet_by_index(1)
for col_index in range(5,sheet.ncols):
pName = sheet.cell(0,col_index).value
pDes = sheet.cell(1,col_index).value
pLink = sheet.cell(2,col_index).value
program = (pName,pDes,pLink)
program_list.append(program)
return program_list
##Here a connection with a db is opened using the psycopg2 module. Then a create query for a table called programs is written here.
##The readProgs() function is then called to return the correct cell values. Then the function disects the the list and puts
##the values into an insert statement using a for loop. Lastly the .commit() writes the statement.
def writeProgs():
con = psycopg2.connect(database='chriscalhoun', user='chriscalhoun')
cur = con.cursor()
cur.execute("CREATE TABLE programs(\
id serial PRIMARY KEY,\
program_name VARCHAR(200), \
program_description TEXT, \
program_link VARCHAR(200))")
program_list=readProgs()
for i in program_list:
program_name = i[0]
program_description = i[1]
program_link = i[2]
cur.execute("INSERT INTO programs(program_name, program_description, program_link)\
VALUES\
('" + program_name + "', '" + program_description + "', '" + program_link + "')")
con.commit()
##Here we are grabbing a different section of data from the excel spreadsheet using the same method as the readProgs() and returning
##a list with the desired values.
def readMeasures():
book = open_workbook('aug_3_measures.xlsx','r')
sheet = book.sheet_by_index(1)
for row_index in range(3,sheet.nrows):
u'\xae'.encode('utf-8')
measure_description = sheet.cell(row_index,3).value
care_setting = sheet.cell(row_index,4).value
cms_id = sheet.cell(row_index,0).value
nqf_id = sheet.cell(row_index,1).value
pqrs_id = sheet.cell(row_index,2).value
measure = (measure_description.encode('ascii', 'ignore'),
care_setting.encode('ascii', 'ignore'),
str(nqf_id),
str(pqrs_id),
str(cms_id))
measure_list.append(measure)
return measure_list
##Here we are opening a db and creating a table again called measures this time. Then calling the readMeasures() function
##and using a for loop to itereate over the returned list and writing multiple insert statements before commiting the records.
def writeMeasures():
con = psycopg2.connect(database='chriscalhoun', user='chriscalhoun')
cur = con.cursor()
cur.execute("CREATE TABLE measures(\
measure_id serial PRIMARY KEY NOT NULL,\
measure_description TEXT NOT NULL,\
care_setting TEXT,\
nqf_id varchar(20), \
pqrs_id varchar(30),\
cms_id varchar(60))")
measure_list = readMeasures()
for i in measure_list:
measure_description = i[0]
care_setting = i[1]
nqf_id = i[2]
pqrs_id = i[3]
cms_id = i[4]
cur.execute("INSERT INTO measures(measure_description,\
care_setting, nqf_id, pqrs_id, cms_id) \
VALUES\
('"+measure_description + "', '" + care_setting + "', '" + nqf_id + "', '" + pqrs_id + "', '" + cms_id + "')")
con.commit()
## Here we are grabbing the data from the excel sheet and checking the cell for certain values. In this example either a cell with
##data or a blank cell. Be aware that excel and python data types can vary so it is usually the case that data in your excel file
##will need to be converted to text format.
##The function goes through the correct cells in the specified range of rows and cols. Then checks if the cell has anything in it.
##If the cell has data in it the data is appended to a list along with the measure name and the program for that measure.
##Then the list is returned.
def measure_program_check():
checkProgram_list =[]
book = open_workbook('aug_3_measures.xlsx','r')
sheet = book.sheet_by_index(0)
for row_index in range(3,sheet.nrows):
for col_index in range(5, 26):
cell = sheet.cell(row_index,col_index)
check = cell.value
if check != '':
pName = sheet.cell(0,col_index).value
mDes = sheet.cell(row_index, 3).value
checkProgram = (True, pName.encode('ascii', 'ignore'), mDes.encode('ascii','ignore'))
checkProgram_list.append(checkProgram)
## elif check == '':
## pName = sheet.cell(0,col_index).value
## mDes = sheet.cell(row_index, 3).value
## checkProgram = (False, pName.encode('ascii', 'ignore'), mDes.encode('ascii','ignore'))
## checkProgram_list.append(checkProgram)
return checkProgram_list
##Here we create the table for measure_programs and also create the relationships between the measures and programs tables.
##This table uses to foreign keys as the primary key.
def measure_program_CreateInsert():
measure_program_list = []
con = psycopg2.connect(database='chriscalhoun', user='chriscalhoun')
cur = con.cursor()
cur.execute("CREATE TABLE measure_program(measure_id integer REFERENCES measures (measure_id) ON UPDATE RESTRICT NOT NULL,\
program_id integer REFERENCES programs (id) ON UPDATE RESTRICT,\
value BOOLEAN,\
PRIMARY KEY (measure_id, program_id))")
checkProgram_list = measure_program_check()
##A query is ran to retrieve the program names and the primary keys associated with them from the programs table.
##They are both paired into a tuple and placed into a list. The same thing is then repeated with the measure names and their primary keys
cur.execute("SELECT id, program_name FROM programs")
prog_idNameList = cur.fetchall()
cur.execute("SELECT measure_id, measure_description FROM measures")
measure_idNameList = cur.fetchall()
##Here the relationships for the measure_program table are created. The list created by calling measure_program_check()
##is iterated over using a for loop. There are two other for loops that compare the values in the lists programs and ids, and the
##measures and their ids. This is to see if the names are the same and when they are assign the appropriate key value in the new table.
## A list of tuples with the measure ids and program ids for the correct measures are then returned.
measure_program = []
for i in range(len(checkProgram_list)):
for m in measure_idNameList:
if checkProgram_list[i][2]==m[1]:
m_id = m[0]
for p in prog_idNameList:
if checkProgram_list[i][1] == p[1]:
p_id = p[0]
measure_info = (m_id, p_id)
measure_program.append(measure_info)
##Using the execute many function we can simply use string formatting inside the insert statement to populate the query with the
##correct values in each tuple in the list.
query = "INSERT INTO measure_program (measure_id, program_id) VALUES(%s, %s)"
cur.executemany(query, measure_program)
con.commit()
def main():
writeProgs()
writeMeasures()
measure_program_check()
measure_program_CreateInsert()
try:
main()
except psycopg2.DatabaseError, e:
print('Error %s' % e)
sys.exit(1)
finally:
if con:
con.close()
|
|
"""Collects and reports container and host metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import io
import logging
import os
import time
import six
from treadmill import cgroups
from treadmill import cgutils
from treadmill import fs
from treadmill import psmem
from treadmill.fs import linux as fs_linux
NANOSECS_PER_10MILLI = 10000000
_LOGGER = logging.getLogger(__name__)
# Patterns to match Treadmill core processes, to use as filter in psmem.
#
# TODO: currently unused.
_SYSPROCS = ['s6-*', 'treadmill_disc*', 'pid1', 'app_tickets', 'app_presence',
'app_endpoint*']
# yield metrics in chunks of 100
_METRICS_CHUNK_SIZE = 100
def read_memory_stats(cgrp):
"""Reads memory stats for the given treadmill app or system service.
Returns dict: key is pseudofile name
"""
metric = cgrp_meminfo(cgrp)
stats = cgutils.get_stat('memory', cgrp)
metric['memory.stat'] = stats
return metric
_MEMORY_TYPE = [
'memory.failcnt',
'memory.limit_in_bytes',
'memory.max_usage_in_bytes',
'memory.memsw.failcnt',
'memory.memsw.limit_in_bytes',
'memory.memsw.max_usage_in_bytes',
'memory.memsw.usage_in_bytes',
'memory.soft_limit_in_bytes',
'memory.usage_in_bytes',
]
def cgrp_meminfo(cgrp, *pseudofiles):
"""Grab the cgrp mem limits"""
if pseudofiles is None or not pseudofiles:
pseudofiles = _MEMORY_TYPE
metrics = {}
for pseudofile in pseudofiles:
data = cgroups.get_value('memory', cgrp, pseudofile)
# remove memory. prefix
metrics[pseudofile] = data
return metrics
def read_psmem_stats(appname, allpids, cgroup_prefix):
"""Reads per-proc memory details stats."""
apps_group = cgutils.apps_group_name(cgroup_prefix)
cgrp = os.path.join(apps_group, appname)
group_pids = set(cgutils.pids_in_cgroup('memory', cgrp))
# Intersection of all /proc pids (allpids) and pid in .../tasks will give
# the set we are interested in.
#
# "tasks" contain thread pids that we want to filter out.
meminfo = psmem.get_memory_usage(allpids & group_pids, use_pss=True)
return meminfo
_BLKIO_INFO_TYPE = [
'blkio.throttle.io_service_bytes',
'blkio.throttle.io_serviced',
'blkio.io_service_bytes',
'blkio.io_serviced',
'blkio.io_merged',
'blkio.io_queued',
]
def read_blkio_info_stats(cgrp, *pseudofiles):
"""Read bklio statistics for the given Treadmill app.
"""
if pseudofiles is None or not pseudofiles:
pseudofiles = _BLKIO_INFO_TYPE
metrics = {}
for pseudofile in pseudofiles:
blkio_info = cgutils.get_blkio_info(cgrp, pseudofile)
metrics[pseudofile] = blkio_info
return metrics
_BLKIO_VALUE_TYPE = [
'blkio.sectors',
'blkio.time',
]
def read_blkio_value_stats(cgrp, *pseudofiles):
""" read blkio value based cgroup pseudofiles
"""
if pseudofiles is None or not pseudofiles:
pseudofiles = _BLKIO_VALUE_TYPE
metrics = {}
for pseudofile in pseudofiles:
blkio_info = cgutils.get_blkio_value(cgrp, pseudofile)
metrics[pseudofile] = blkio_info
return metrics
def read_load():
"""Reads server load stats."""
with io.open('/proc/loadavg') as f:
# /proc/loadavg file format:
# 1min_avg 5min_avg 15min_avg ...
line = f.read()
loadavg_1min = line.split()[0]
loadavg_5min = line.split()[1]
return (loadavg_1min, loadavg_5min)
def read_cpuacct_stat(cgrp):
"""read cpuacct.stat pseudo file
"""
divided_usage = cgutils.get_stat('cpuacct', cgrp)
# usage in other file in nanseconds, in cpuaaac.stat is 10 miliseconds
for name, value in six.iteritems(divided_usage):
divided_usage[name] = value * NANOSECS_PER_10MILLI
return divided_usage
def read_cpu_stat(cgrp):
"""read cpu.stat pseudo file
"""
throttled_usage = cgutils.get_stat('cpu', cgrp)
return throttled_usage
def read_cpu_system_usage():
""" read cpu system usage.
"""
# XXX: read /proc/stat
def read_cpu_stats(cgrp):
"""Calculate normalized CPU stats given cgroup name.
Returns dict: key is pseudofile name
"""
data = {}
data['cpuacct.usage_percpu'] = cgutils.per_cpu_usage(cgrp)
data['cpuacct.usage'] = cgutils.cpu_usage(cgrp)
data['cpuacct.stat'] = read_cpuacct_stat(cgrp)
data['cpu.stat'] = read_cpu_stat(cgrp)
data['cpu.shares'] = cgutils.get_cpu_shares(cgrp)
return data
def get_fs_usage(block_dev):
"""Get the block statistics and compute the used disk space."""
if block_dev is None:
return {}
fs_info = fs_linux.blk_fs_info(block_dev)
return {'fs.used_bytes': calc_fs_usage(fs_info)}
def calc_fs_usage(fs_info):
"""Return the used filesystem space in bytes.
Reserved blocks are treated as used blocks because the primary goal of this
usage metric is to indicate whether the container has to be resized.
"""
if not fs_info:
return 0
blk_cnt = int(fs_info['block count'])
free_blk_cnt = int(fs_info['free blocks'])
blk_size = int(fs_info['block size'])
return (blk_cnt - free_blk_cnt) * blk_size
def app_metrics(cgrp, block_dev):
"""Returns app metrics or empty dict if app not found."""
result = {}
try:
result['timestamp'] = time.time()
# merge memory stats into dict
memory_stats = read_memory_stats(cgrp)
result.update(memory_stats)
# merge cpu stats into dict
cpu_stats = read_cpu_stats(cgrp)
result.update(cpu_stats)
# merge blkio stats into dict
blkio_stats = read_blkio_info_stats(cgrp)
result.update(blkio_stats)
blkio_stats = read_blkio_value_stats(cgrp)
result.update(blkio_stats)
# merge filesystem stats into dict
fs_usage = get_fs_usage(block_dev)
result.update(fs_usage)
except OSError as err:
if err.errno != errno.ENOENT:
raise err
except IOError as err: # pylint: disable=duplicate-except
if err.errno != errno.ENOENT:
raise err
return result
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import functools
import redis
from multiprocessing.dummy import Pool as ThreadPool
from redis.client import Lock
from redis.sentinel import Sentinel
from .commands import SHARD_METHODS
from ._compat import basestring, iteritems
from .hashring import HashRing
from .helpers import format_servers
from .pipeline import Pipeline
from .sentinel import SentinelRedis
_findhash = re.compile('.*\{(.*)\}.*', re.I)
def list_or_args(keys, args):
# returns a single list combining keys and args
try:
iter(keys)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, basestring):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
class RedisShardAPI(object):
def __init__(self, servers, hash_method='crc32', sentinel=None, strict_redis=False):
self.nodes = []
self.connections = {}
self.pool = None
servers = format_servers(servers)
if sentinel:
sentinel = Sentinel(sentinel['hosts'], socket_timeout=sentinel.get('socket_timeout', 1))
for server_config in servers:
name = server_config.pop('name')
if name in self.connections:
raise ValueError("server's name config must be unique")
if sentinel:
self.connections[name] = SentinelRedis(sentinel, name)
elif strict_redis:
self.connections[name] = redis.StrictRedis(**server_config)
else:
self.connections[name] = redis.Redis(**server_config)
server_config['name'] = name
self.nodes.append(name)
self.ring = HashRing(self.nodes, hash_method=hash_method)
def get_server_name(self, key):
g = _findhash.match(key)
if g is not None and len(g.groups()) > 0:
key = g.groups()[0]
name = self.ring.get_node(key)
return name
def get_server(self, key):
name = self.get_server_name(key)
return self.connections[name]
def _build_pool(self):
if self.pool is None:
self.pool = ThreadPool(len(self.nodes))
def __wrap(self, method, *args, **kwargs):
try:
key = args[0]
assert isinstance(key, basestring)
except:
raise ValueError("method '%s' requires a key param as the first argument" % method)
server = self.get_server(key)
f = getattr(server, method)
return f(*args, **kwargs)
def __wrap_eval(self, method, script_or_sha, numkeys, *keys_and_args):
if numkeys != 1:
raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded")
key = keys_and_args[0]
server = self.get_server(key)
f = getattr(server, method)
return f(script_or_sha, numkeys, *keys_and_args)
def __wrap_tag(self, method, *args, **kwargs):
key = args[0]
if isinstance(key, basestring) and '{' in key:
server = self.get_server(key)
elif isinstance(key, list) and '{' in key[0]:
server = self.get_server(key[0])
else:
raise ValueError("method '%s' requires tag key params as its arguments" % method)
method = method.lstrip("tag_")
f = getattr(server, method)
return f(*args, **kwargs)
def __getattr__(self, method):
if method in SHARD_METHODS:
return functools.partial(self.__wrap, method)
elif method in ('eval', 'evalsha'):
return functools.partial(self.__wrap_eval, method)
elif method.startswith("tag_"):
return functools.partial(self.__wrap_tag, method)
else:
raise NotImplementedError("method '%s' cannot be sharded" % method)
#########################################
### some methods implement as needed ###
########################################
def brpop(self, key, timeout=0):
if not isinstance(key, basestring):
raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded")
server = self.get_server(key)
return server.brpop(key, timeout)
def blpop(self, key, timeout=0):
if not isinstance(key, basestring):
raise NotImplementedError("The key must be single string;mutiple keys cannot be sharded")
server = self.get_server(key)
return server.blpop(key, timeout)
def keys(self, key):
_keys = []
for node in self.nodes:
server = self.connections[node]
_keys.extend(server.keys(key))
return _keys
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
args = list_or_args(keys, args)
server_keys = {}
ret_dict = {}
for key in args:
server_name = self.get_server_name(key)
server_keys[server_name] = server_keys.get(server_name, [])
server_keys[server_name].append(key)
for server_name, sub_keys in iteritems(server_keys):
values = self.connections[server_name].mget(sub_keys)
ret_dict.update(dict(zip(sub_keys, values)))
result = []
for key in args:
result.append(ret_dict.get(key, None))
return result
def mset(self, mapping):
"""
Sets each key in the ``mapping`` dict to its corresponding value
"""
servers = {}
for key, value in mapping.items():
server_name = self.get_server_name(key)
servers.setdefault(server_name, [])
servers[server_name].append((key, value))
for name, items in servers.items():
self.connections[name].mset(dict(items))
return True
def flushdb(self):
for node in self.nodes:
server = self.connections[node]
server.flushdb()
def lock(self, name, timeout=None, sleep=0.1):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
"""
return Lock(self, name, timeout=timeout, sleep=sleep)
def pipeline(self):
return Pipeline(self)
def script_load(self, script):
shas = []
for node in self.nodes:
server = self.connections[node]
shas.append(server.script_load(script))
if not all(x == shas[0] for x in shas):
raise ValueError('not all server returned same sha')
return shas[0]
def haskey(self, key):
server_name = self.get_server_name(key)
return key in self.connections[server_name]
def __delitem__(self, key):
server_name = self.get_server_name(key)
del self.connections[server_name][key]
|
|
import functools
import multiprocessing
import os
import signal
import time
import types
import psutil
from simpleflow import logger
from .named_mixin import NamedMixin, with_state
def reset_signal_handlers(func):
"""
Decorator that resets signal handlers from the decorated function. Useful
for workers where we actively want handlers defined on the supervisor to be
removed, because they wouldn't work on the worker process.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
return func(*args, **kwargs)
wrapped.__wrapped__ = func
return wrapped
def _void_handle_sigchld(signum, frame):
"""
Default action for a SIGCHLD signal handling is to ignore it
which in practice has no effect on the running program. Having
a handler that does nothing is a bit different, in the sense
it will interrupt the execution of any "time.sleep()" routing.
From "time" module docs:
The actual suspension time may be less than that requested
because any caught signal will terminate the sleep()
following execution of that signal's catching routine.
"""
pass
class Supervisor(NamedMixin):
"""
The `Supervisor` class is responsible for managing one or many worker processes
in parallel. Those processes can be "deciders" or "activity workers" in the
SWF terminology.
It's heavily inspired by the process Supervisor from honcho (which is a clone of
the "foreman" process manager, in python): https://github.com/nickstenning/honcho
It also has its roots in the former simpleflow process manager and some of Botify
private code which wasn't really well tested, and was re-written in a TDD-y
style.
"""
def __init__(self, payload, arguments=None, nb_children=None, background=False):
"""
Initializes a Manager() instance, with a payload (a callable that will be
executed on worker processes), some arguments (a list or tuple of arguments
to pass to the callable on workers), and nb_children (the expected number
of workers, which defaults to the number of CPU cores if not passed).
:param payload:
:type payload: callable
:param arguments:
:type arguments: tuple | list
:param nb_children:
:type nb_children: int
:param background: wether the supervisor process should launch in background
:type background: bool
"""
# NB: below, compare explicitly to "None" there because nb_children could be 0
if nb_children is None:
self._nb_children = multiprocessing.cpu_count()
else:
self._nb_children = nb_children
self._payload = payload
self._payload_friendly_name = self.payload_friendly_name()
self._named_mixin_properties = ["_payload_friendly_name", "_nb_children"]
self._args = arguments if arguments is not None else ()
self._background = background
self._processes = {}
self._terminating = False
super(Supervisor, self).__init__()
@with_state("running")
def start(self):
"""
Used to start the Supervisor process once it's configured. Has to be called
explicitly on a Supervisor instance so it starts (no auto-start from __init__()).
"""
logger.info("starting {}".format(self._payload))
if self._background:
p = multiprocessing.Process(target=self.target)
p.start()
else:
self.target()
def _cleanup_worker_processes(self):
# cleanup children
to_remove = []
for pid, child in self._processes.items():
try:
name, status = child.name(), child.status()
except psutil.NoSuchProcess: # May be untimely deceased
name, status = "unknown", "unknown"
logger.debug(
" child: name=%s pid=%d status=%s" % (name, child.pid, status)
)
if status in (psutil.STATUS_ZOMBIE, "unknown"):
logger.debug(" process {} is zombie, will cleanup".format(child.pid))
# join process to clean it up
child.wait()
# set the process to be removed from self._processes
to_remove.append(pid)
# cleanup our internal state (self._processes)
for pid in to_remove:
del self._processes[pid]
def _start_worker_processes(self):
"""
Start missing worker processes depending on self._nb_children and the current
processes stored in self._processes.
"""
if self._terminating:
return
for _ in range(len(self._processes), self._nb_children):
child = multiprocessing.Process(
target=reset_signal_handlers(self._payload), args=self._args
)
child.start()
# One might wonder if `child.pid` is guaranteed to be set at this
# point. I tried it experimentally, and read quickly the source
# at https://github.com/python/cpython/blob/2.7/Lib/multiprocessing/process.py
# which shows that `pid` ultimately translates to `os.getpid()` after the
# fork. So no big risk, but I add an assertion just in case anyway.
pid = child.pid
assert pid, "Cannot add process with pid={}: {}".format(pid, child)
self._processes[pid] = psutil.Process(pid)
def target(self):
"""
Supervisor's main "target", as defined in the `multiprocessing` API. It's the
code that the manager will execute once started.
"""
# handle signals
self.bind_signal_handlers()
# protection against double use of ".start()"
if len(self._processes) != 0:
raise Exception(
"Child processes map is not empty, already called .start() ?"
)
# wait for all processes to finish
while True:
# if terminating, join all processes and exit the loop so we finish
# the supervisor process
if self._terminating:
for proc in self._processes.values():
logger.info(
"process: waiting for proces={} to finish.".format(proc)
)
proc.wait()
break
# start worker processes
self._cleanup_worker_processes()
self._start_worker_processes()
# re-evaluate state at least every 5 seconds ; if a SIGCHLD happens during
# the "time.sleep()" below, it will be interrupted, making the code above
# run nearly immediately ; but if a SIGCHLD happens during the two calls
# above, the "time.sleep()" here won't be stopped, so better have it
# relatively short, but not too short since the above methods involve
# scanning a bunch of entries in /proc so that could become slow if we do
# it every 0.1s.
time.sleep(5)
def bind_signal_handlers(self):
"""
Binds signals for graceful shutdown:
- SIGTERM and SIGINT lead to a graceful shutdown
- SIGCHLD is intentionally left to a void handler, see comment
- other signals are not modified for now
"""
# NB: Function is nested to have a reference to *self*.
def _handle_graceful_shutdown(signum, frame):
signals_map = {2: "SIGINT", 15: "SIGTERM"}
signal_name = signals_map.get(signum, signum)
logger.info(
"process: caught signal signal={} pid={}".format(
signal_name, os.getpid()
)
)
self.terminate()
# bind SIGTERM and SIGINT
signal.signal(signal.SIGTERM, _handle_graceful_shutdown)
signal.signal(signal.SIGINT, _handle_graceful_shutdown)
# bind SIGCHLD
signal.signal(signal.SIGCHLD, _void_handle_sigchld)
@with_state("stopping")
def terminate(self):
"""
Terminate all worker processes managed by this Supervisor.
"""
self._terminating = True
logger.info(
"process: will stop workers, this might take up several minutes. "
"Please, be patient."
)
self._killall()
def _killall(self):
"""
Sends a stop (SIGTERM) signal to all worker processes.
"""
for process in self._processes.values():
logger.info("process: sending SIGTERM to pid={}".format(process.pid))
process.terminate()
def payload_friendly_name(self):
payload = self._payload
if isinstance(payload, types.MethodType):
instance = payload.__self__
return "{}.{}".format(instance.__class__.__name__, payload.__name__)
elif isinstance(payload, types.FunctionType):
return payload.__name__
raise TypeError("invalid payload type {}".format(type(payload)))
|
|
import datetime
import dateutil.parser
from pixiv_pixie.utils import LazyProperty
from .constants import IllustType, IllustAgeLimit
def _lazy(attr_name):
def get_func(self):
self.update()
return getattr(self, attr_name)
return LazyProperty(get_func, property_name=attr_name)
class PixivIllust:
"""Pixiv Illust object.
Used to access illust info.
Attributes:
illust_id: Illust ID.
title: Title.
caption: Some description text. May contains HTML tags or escape
characters.
creation_time: A datetime object.
width: Width.
height: Height.
image_urls: A list of original image urls. A ugoira's image_urls will
only contains one URL of a ZIP file which contains all frames.
frame_delays: None for non-ugoira illust. Or a list of delay durations
in microsecond.
type: Illust type. Will be ILLUST, MANGA or UGOIRA. (These constants are
defined in pixiv_pixie.constants.illust.)
age_limit: Age limitation type. Will be ALL_AGE, R18 or R18G. (These
constants are defined in pixiv_pixie.constants.illust.)
tags: Tags.
tools: Tools used be the author.
user_account: The author's account name.
user_id: The author's user ID.
user_name: The author's nickname.
total_bookmarks: The number bookmarks on this illust.
total_view: The number of times this illust been viewed.
rank: Ranking number of the illust. Only make sense when the illust was
fetched from ranking. Starting from 1.
"""
title = _lazy('title')
caption = _lazy('caption')
creation_time = _lazy('creation_time')
width = _lazy('width')
height = _lazy('height')
image_urls = _lazy('image_urls')
frame_delays = _lazy('frame_delays')
type = _lazy('type')
age_limit = _lazy('age_limit')
tags = _lazy('tags')
tools = _lazy('tools')
user_account = _lazy('user_account')
user_id = _lazy('user_id')
user_name = _lazy('user_name')
total_bookmarks = _lazy('total_bookmarks')
total_view = _lazy('total_view')
@classmethod
def from_papi(cls, pixie, json_result):
illust = cls(pixie=pixie, illust_id=json_result.id)
illust.update_from_papi(json_result)
return illust
@classmethod
def from_aapi(cls, pixie, json_result):
illust = cls(pixie=pixie, illust_id=json_result.id)
illust.update_from_aapi(json_result)
return illust
def __init__(self, pixie, illust_id):
self.pixie = pixie
self.illust_id = illust_id
self.rank = None
def __repr__(self):
return 'PixivIllust(illust_id={})'.format(self.illust_id)
@property
def size(self):
"""A tuple of (width, height)."""
return self.width, self.height
@property
def area(self):
"""Area in pixels."""
return self.width * self.height
@property
def aspect_ratio(self):
"""Width divided by height."""
if self.height == 0:
return 0
return self.width / self.height
@property
def page_count(self):
"""The number of pages."""
return len(self.image_urls)
def update(self):
illust = self.pixie.illust(self.illust_id)
attributes = [
'illust_id',
'title',
'caption',
'creation_time',
'width',
'height',
'image_urls',
'frame_delays',
'type',
'age_limit',
'tags',
'tools',
'user_account',
'user_id',
'user_name',
'total_bookmarks',
'total_view',
'rank',
]
for attr in attributes:
value = getattr(illust, attr)
if isinstance(value, list):
value = value.copy()
setattr(self, attr, value)
def update_from_papi(self, json_result):
self.illust_id = json_result.id
self.title = json_result.title
if json_result.caption is not None:
self.caption = json_result.caption
else:
self.caption = ''
self.creation_time = datetime.datetime.strptime(
json_result.created_time,
'%Y-%m-%d %H:%M:%S',
)
self.width = json_result.width
self.height = json_result.height
if json_result.page_count == 1:
if json_result.type == 'ugoira': # ugoira
if json_result.metadata is not None:
self.image_urls = [
json_result.metadata.zip_urls.ugoira600x600,
]
self.frame_delays = [
frame.delay_msec
for frame in json_result.metadata.frames
]
else: # single page illust
self.image_urls = [json_result.image_urls.large]
self.frame_delays = None
else: # multi page illust
if json_result.metadata is not None:
self.image_urls = [
page.image_urls.large
for page in json_result.metadata.pages
]
self.frame_delays = None
self.type = {
'illustration': IllustType.ILLUST,
'manga': IllustType.MANGA,
'ugoira': IllustType.UGOIRA,
}[json_result.type]
self.age_limit = {
'all-age': IllustAgeLimit.ALL_AGE,
'r18': IllustAgeLimit.R18,
'r18-g': IllustAgeLimit.R18G,
}[json_result.age_limit]
self.tags = [tag for tag in json_result.tags]
if json_result.tools is not None:
self.tools = [tool for tool in json_result.tools]
self.user_account = json_result.user.account
self.user_id = json_result.user.id
self.user_name = json_result.user.name
favorited_count = json_result.stats.favorited_count
if favorited_count.public is not None:
self.total_bookmarks = sum(favorited_count.values())
self.total_view = json_result.stats.views_count
def update_from_aapi(self, json_result):
self.illust_id = json_result.id
self.title = json_result.title
self.caption = json_result.caption
self.creation_time = dateutil.parser.parse(json_result.create_date)
self.width = json_result.width
self.height = json_result.height
if json_result.page_count == 1 and json_result.type != 'ugoira':
# single page illust
self.image_urls = [
json_result.meta_single_page.original_image_url
]
self.frame_delays = None
elif json_result.page_count > 1: # multi page illust
self.image_urls = [
page.image_urls.original
for page in json_result.meta_pages
]
self.frame_delays = None
else: # ugoira
pass
self.type = {
'illust': IllustType.ILLUST,
'manga': IllustType.MANGA,
'ugoira': IllustType.UGOIRA,
}[json_result.type]
self.tags = [tag.name for tag in json_result.tags]
self.tools = [tool for tool in json_result.tools]
if 'R-18' in self.tags:
self.age_limit = IllustAgeLimit.R18
elif 'R-18G' in self.tags:
self.age_limit = IllustAgeLimit.R18G
else:
self.age_limit = IllustAgeLimit.ALL_AGE
self.user_account = json_result.user.account
self.user_id = json_result.user.id
self.user_name = json_result.user.name
self.total_bookmarks = json_result.total_bookmarks
self.total_view = json_result.total_view
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import contextlib
import errno
import itertools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from grpc.framework.alpha import exceptions
from grpc.framework.foundation import future
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'EarlyAdopterTestServiceServicer'
SERVER_IDENTIFIER = 'EarlyAdopterTestServiceServer'
STUB_IDENTIFIER = 'EarlyAdopterTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'early_adopter_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'early_adopter_create_TestService_stub'
# The timeout used in tests of RPCs that are supposed to expire.
SHORT_TIMEOUT = 2
# The timeout used in tests of RPCs that are not supposed to expire. The
# absurdly large value doesn't matter since no passing execution of this test
# module will ever wait the duration.
LONG_TIMEOUT = 600
NO_DELAY = 0
# Build mode environment variable set by tools/run_tests/run_tests.py.
_build_mode = os.environ['CONFIG']
class _ServicerMethods(object):
def __init__(self, test_pb2, delay):
self._condition = threading.Condition()
self._delay = delay
self._paused = False
self._fail = False
self._test_pb2 = test_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
time.sleep(self._delay)
def UnaryCall(self, request, unused_rpc_context):
response = self._test_pb2.SimpleResponse()
response.payload.payload_type = self._test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._test_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._test_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._test_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(test_pb2, delay):
"""Provides a servicer backend and a stub.
The servicer is just the implementation
of the actual servicer passed to the face player of the python RPC
implementation; the two are detached.
Non-zero delay puts a delay on each call to the servicer, representative of
communication latency. Timeout is the default timeout for the stub while
waiting for the service.
Args:
test_pb2: The test_pb2 module generated by this test.
delay: Delay in seconds per response from the servicer.
Yields:
A (servicer_methods, servicer, stub) three-tuple where servicer_methods is
the back-end of the service bound to the stub and the server and stub
are both activated and ready for use.
"""
servicer_methods = _ServicerMethods(test_pb2, delay)
class Servicer(getattr(test_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(
test_pb2, SERVER_FACTORY_IDENTIFIER)(servicer, 0)
with server:
port = server.port()
stub = getattr(test_pb2, STUB_FACTORY_IDENTIFIER)('localhost', port)
with stub:
yield servicer_methods, stub, server
def _streaming_input_request_iterator(test_pb2):
for _ in range(3):
request = test_pb2.StreamingInputCallRequest()
request.payload.payload_type = test_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request(test_pb2):
request = test_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator(test_pb2):
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.async()`) only gives futures for the *non-streaming* responses,
else it behaves like its blocking cousin.
"""
def setUp(self):
protoc_command = '../../bins/%s/protobuf/protoc' % _build_mode
protoc_plugin_filename = '../../bins/%s/grpc_python_plugin' % _build_mode
test_proto_filename = './test.proto'
if not os.path.isfile(protoc_command):
# Assume that if we haven't built protoc that it's on the system.
protoc_command = 'protoc'
# Ensure that the output directory exists.
self.outdir = tempfile.mkdtemp()
# Invoke protoc with the plugin.
cmd = [
protoc_command,
'--plugin=protoc-gen-python-grpc=%s' % protoc_plugin_filename,
'-I %s' % os.path.dirname(test_proto_filename),
'--python_out=%s' % self.outdir,
'--python-grpc_out=%s' % self.outdir,
os.path.basename(test_proto_filename),
]
subprocess.call(' '.join(cmd), shell=True)
sys.path.append(self.outdir)
def tearDown(self):
try:
shutil.rmtree(self.outdir)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
# TODO(atash): Figure out which of these tests is hanging flakily with small
# probability.
def testImportAttributes(self):
# check that we can access the generated module and its members.
import test_pb2 # pylint: disable=g-import-not-at-top
self.assertIsNotNone(getattr(test_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, SERVER_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(getattr(test_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
import test_pb2
with _CreateService(
test_pb2, NO_DELAY) as (servicer, stub, unused_server):
request = test_pb2.SimpleRequest(response_size=13)
def testUnaryCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (methods, stub, unused_server):
timeout = 6 # TODO(issue 2039): LONG_TIMEOUT like the other methods.
request = test_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, timeout)
expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallAsync(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = test_pb2.SimpleRequest(response_size=13)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.async(request, LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallAsyncExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
request = test_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.async(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
response_future.result()
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testUnaryCallAsyncCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = test_pb2.SimpleRequest(response_size=13)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.pause():
response_future = stub.UnaryCall.async(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallAsyncFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = test_pb2.SimpleRequest(response_size=13)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.fail():
response_future = stub.UnaryCall.async(request, LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = _streaming_output_request(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (methods, stub, unused_server):
responses = stub.StreamingOutputCall(request, LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in itertools.izip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testStreamingOutputCallExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = _streaming_output_request(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.pause():
responses = stub.StreamingOutputCall(request, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
list(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testStreamingOutputCallCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = _streaming_output_request(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (
unused_methods, stub, unused_server):
responses = stub.StreamingOutputCall(request, SHORT_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(future.CancelledError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this times out '
'instead of raising the proper error.')
def testStreamingOutputCallFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request = _streaming_output_request(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(exceptions.ServicerError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testStreamingInputCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (methods, stub, unused_server):
response = stub.StreamingInputCall(
_streaming_input_request_iterator(test_pb2), LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(test_pb2), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallAsync(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.pause():
response_future = stub.StreamingInputCall.async(
_streaming_input_request_iterator(test_pb2), LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(test_pb2), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallAsyncExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.pause():
response_future = stub.StreamingInputCall.async(
_streaming_input_request_iterator(test_pb2), SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
response_future.result()
self.assertIsInstance(
response_future.exception(), exceptions.ExpirationError)
def testStreamingInputCallAsyncCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.pause():
timeout = 6 # TODO(issue 2039): LONG_TIMEOUT like the other methods.
response_future = stub.StreamingInputCall.async(
_streaming_input_request_iterator(test_pb2), timeout)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallAsyncFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.fail():
response_future = stub.StreamingInputCall.async(
_streaming_input_request_iterator(test_pb2), SHORT_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (methods, stub, unused_server):
responses = stub.FullDuplexCall(
_full_duplex_request_iterator(test_pb2), LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
_full_duplex_request_iterator(test_pb2), 'not a real RpcContext!')
for expected_response, response in itertools.izip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testFullDuplexCallExpired(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request_iterator = _full_duplex_request_iterator(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator, SHORT_TIMEOUT)
with self.assertRaises(exceptions.ExpirationError):
list(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testFullDuplexCallCancelled(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (methods, stub, unused_server):
request_iterator = _full_duplex_request_iterator(test_pb2)
responses = stub.FullDuplexCall(request_iterator, LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(future.CancelledError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this hangs forever '
'and fix.')
def testFullDuplexCallFailed(self):
import test_pb2 # pylint: disable=g-import-not-at-top
request_iterator = _full_duplex_request_iterator(test_pb2)
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator, LONG_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(exceptions.ServicerError):
next(responses)
@unittest.skip('TODO(atash,nathaniel): figure out why this flakily hangs '
'forever and fix.')
def testHalfDuplexCall(self):
import test_pb2 # pylint: disable=g-import-not-at-top
with _CreateService(test_pb2, NO_DELAY) as (
methods, stub, unused_server):
def half_duplex_request_iterator():
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = stub.HalfDuplexCall(
half_duplex_request_iterator(), LONG_TIMEOUT)
expected_responses = methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for check in itertools.izip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
import test_pb2 # pylint: disable=g-import-not-at-top
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = test_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
with _CreateService(test_pb2, NO_DELAY) as (methods, stub, unused_server):
with wait():
responses = stub.HalfDuplexCall(
half_duplex_request_iterator(), SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(exceptions.ExpirationError):
next(responses)
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
unittest.main(verbosity=2)
|
|
#TIME COMPLEXITY :
"""
FOR WORD PREDICTION : O(1)
FOR WORD PREDICTION WITH 'R'TH RANK: O(R)
"""
#import the modules necessary
from nltk.util import ngrams
from collections import defaultdict
from collections import OrderedDict
import string
import time
import gc
start_time = time.time()
#returns : void
#arg: string,dict,dict,dict,dict
#loads the corpus for the dataset and makes the frequency count of quadgram and trigram strings
def loadCorpus(file_path,bi_dict,tri_dict,quad_dict,vocab_dict):
w1 = '' #for storing the 3rd last word to be used for next token set
w2 = '' #for storing the 2nd last word to be used for next token set
w3 = '' #for storing the last word to be used for next token set
token = []
word_len = 0
#open the corpus file and read it line by line
with open(file_path,'r') as file:
for line in file:
#split the line into tokens
token = line.split()
i = 0
#for each word in the token list ,remove pucntuations and change to lowercase
for word in token :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
token[i] = word.lower()
i += 1
#make the token list into a string
content = " ".join(token)
token = content.split()
word_len = word_len + len(token)
if not token:
continue
#add the last word from previous line
if w3!= '':
token.insert(0,w3)
temp0 = list(ngrams(token,2))
#since we are reading line by line some combinations of word might get missed for pairing
#for trigram
#first add the previous words
if w2!= '':
token.insert(0,w2)
#tokens for trigrams
temp1 = list(ngrams(token,3))
#insert the 3rd last word from previous line for quadgram pairing
if w1!= '':
token.insert(0,w1)
#add new unique words to the vocaulary set if available
for word in token:
if word not in vocab_dict:
vocab_dict[word] = 1
else:
vocab_dict[word]+= 1
#tokens for quadgrams
temp2 = list(ngrams(token,4))
#count the frequency of the bigram sentences
for t in temp0:
sen = ' '.join(t)
bi_dict[sen] += 1
#count the frequency of the trigram sentences
for t in temp1:
sen = ' '.join(t)
tri_dict[sen] += 1
#count the frequency of the quadgram sentences
for t in temp2:
sen = ' '.join(t)
quad_dict[sen] += 1
#then take out the last 3 words
n = len(token)
#store the last few words for the next sentence pairing
w1 = token[n -3]
w2 = token[n -2]
w3 = token[n -1]
return word_len
####################################################################################
#returns: string
#arg: string
#remove punctuations and make the string lowercase
def removePunctuations(sen):
#split the string into word tokens
temp_l = sen.split()
i = 0
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
#spliting is being don here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
return content
####################################################################################
#returns: string
#arg: string,dict,int
#does prediction for the the sentence
def doPrediction(sen,prob_dict,rank = 1):
if sen in prob_dict:
if rank <= len(prob_dict[sen]):
return prob_dict[sen][rank-1][1]
else:
return prob_dict[sen][0][1]
else:
return "Can't predict"
####################################################################################
#returns: void
#arg: dict,dict,dict,dict,dict,int
#creates dict for storing probable words with their probabilities for a trigram sentence
def createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len):
i = 0
with open('quad_dict.txt','r') as file:
# for quad_sen in quad_dict:
for quad_sen in file:
prob = 0.0
quad_token = quad_sen.split()
tri_sen = ' '.join(quad_token[:3])
tri_count = tri_dict[tri_sen]
if tri_count != 0:
prob = interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict,
l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25)
if tri_sen not in prob_dict:
prob_dict[tri_sen] = []
prob_dict[tri_sen].append([prob,quad_token[-1]])
else:
prob_dict[tri_sen].append([prob,quad_token[-1]])
i += 1
print('i:',i)
prob = None
tri_count = None
quad_token = None
tri_sen = None
####################################################################################
#returns: void
#arg: dict
#for writing the probable word dict in text file
def writeProbWords(prob_dict):
with open('probab_dict.txt','w') as file:
for key in prob_dict:
file.write(key+' '+str(prob_dict[key])+'\n')
####################################################################################
#returns: void
#arg: dict
#for sorting the probable word acc. to their probabilities
def sortProbWordDict(prob_dict):
for key in prob_dict:
if len(prob_dict[key])>1:
sorted(prob_dict[key],reverse = True)
####################################################################################
#returns: string
#arg: void
#for taking input from user
def takeInput():
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
return sen
####################################################################################
#return:int
#arg:list,dict,dict,dict,dict
#computes the score for test data
def computeTestScore(test_sent,tri_dict,quad_dict,vocab_dict,prob_dict):
#increment the score value if correct prediction is made else decrement its value
score = 0
w = open('test_result.txt','w')
for sent in test_sent:
sen_token = sent[:3]
sen = " ".join(sen_token)
correct_word = sent[3]
result = doPrediction(sen,prob_dict)
if result == correct_word:
s = sen +" : "+result+'\n'
w.write(s)
score+=1
w.close()
return score
#######################################################################################
#return:float
#arg:list,int,dict,dict,dict,dict
#computes the score for test data
def computePerplexity(test_quadgrams,token_len,tri_dict,quad_dict,vocab_dict,prob_dict):
perplexity = float(1.0)
n = token_len
for item in quad_dict:
sen_token = item.split()
sen = ' '.join(sen_token[0:3])
prob = quad_dict[item]/tri_dict[sen]
perplexity = perplexity * ( prob**(1./n))
return perplexity
#######################################################################################
#returns: float
#arg: float,float,float,float,list,list,dict,dict,dict,dict
#for calculating the interpolated probablity
def interpolatedProbability(quad_token,token_len, vocab_dict, bi_dict, tri_dict, quad_dict,
l1 = 0.25, l2 = 0.25, l3 = 0.25 , l4 = 0.25):
sen = ' '.join(quad_token)
prob =(
l1*(quad_dict[sen] / tri_dict[' '.join(quad_token[0:3])])
+ l2*(tri_dict[' '.join(quad_token[1:4])] / bi_dict[' '.join(quad_token[1:3])])
+ l3*(bi_dict[' '.join(quad_token[2:4])] / vocab_dict[quad_token[2]])
+ l4*(vocab_dict[quad_token[3]] / token_len)
)
return prob
####################################################################################
#return: void
#arg:string,string,dict,dict,dict,dict,dict
#Used for testing the Language Model
def trainCorpus(train_file,test_file,bi_dict,tri_dict,quad_dict,vocab_dict,prob_dict):
test_result = ''
score = 0
#load the training corpus for the dataset
token_len = loadCorpus('training_corpus.txt',bi_dict,tri_dict,quad_dict,vocab_dict)
print("---Processing Time for Corpus Loading: %s seconds ---" % (time.time() - start_time))
start_time1 = time.time()
writeQuads(quad_dict)
#creates a dictionary of probable words
createProbableWordDict(bi_dict,tri_dict,quad_dict,prob_dict,vocab_dict,token_len)
#sort the dictionary of probable words
sortProbWordDict(prob_dict)
gc.collect()
print(len(prob_dict))
print("---Processing Time for Creating Probable Word Dict: %s seconds ---" % (time.time() - start_time1))
test_data = ''
#Now load the test corpus
with open('testing_corpus.txt','r') as file :
test_data = file.read()
#remove punctuations from the test data
test_data = removePunctuations(test_data)
test_token = test_data.split()
#split the test data into 4 words list
test_token = test_data.split()
test_quadgrams = list(ngrams(test_token,4))
#print(len(test_token))
start_time1 = time.time()
score = computeTestScore(test_quadgrams,tri_dict,quad_dict,vocab_dict,prob_dict)
print('Score:',score)
print("---Processing Time for computing score: %s seconds ---" % (time.time() - start_time1))
start_time2 = time.time()
perplexity = computePerplexity(test_token,token_len,tri_dict,quad_dict,vocab_dict,prob_dict)
print('Perplexity:',perplexity)
print("---Processing Time for computing Perplexity: %s seconds ---" % (time.time() - start_time2))
test_result += 'TEST RESULTS\nScore: '+str(score) + '\nPerplexity: '+str(perplexity)
with open('test_results.txt','w') as file:
file.write(test_result)
#############################################################################################
#returns: string
#arg: void
#for taking input from user
def takeInput():
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
return sen
####################################################################################
#returns: void
#arg: dict
#for writing the contents of quad_dict to a text file
def writeQuads(quad_dict):
with open('quad_dict.txt','w') as file:
for quad in quad_dict:
file.write(quad+'\n')
####################################################################################
def main():
#variable declaration
tri_dict = defaultdict(int) #for keeping count of sentences of three words
quad_dict = defaultdict(int) #for keeping count of sentences of three words
vocab_dict = defaultdict(int) #for storing the different words with their frequencies
prob_dict = OrderedDict() #for storing the probabilities of probable words for a sentence
bi_dict = defaultdict(int)
train_file = 'training_corpus.txt'
test_file = 'testing_corpus.txt'
#load the corpus for the dataset
trainCorpus(train_file,test_file,bi_dict,tri_dict,quad_dict,vocab_dict,prob_dict)
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import, unicode_literals
from future.builtins import int, open, str
import os
import mimetypes
from json import dumps
from django.template.response import TemplateResponse
try:
from urllib.parse import urljoin, urlparse
except ImportError:
from urlparse import urljoin, urlparse
from django.apps import apps
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admin.options import ModelAdmin
from django.contrib.staticfiles import finders
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound)
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import requires_csrf_token
from mezzanine.conf import settings
from mezzanine.core.forms import get_edit_form
from mezzanine.core.models import Displayable, SitePermission
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.views import is_editable, paginate, set_cookie
from mezzanine.utils.sites import has_site_permission
from mezzanine.utils.urls import next_url
mimetypes.init()
def set_device(request, device=""):
"""
Sets a device name in a cookie when a user explicitly wants to go
to the site for a particular device (eg mobile).
"""
response = redirect(add_cache_bypass(next_url(request) or "/"))
set_cookie(response, "mezzanine-device", device, 60 * 60 * 24 * 365)
return response
@staff_member_required
def set_site(request):
"""
Put the selected site ID into the session - posted to from
the "Select site" drop-down in the header of the admin. The
site ID is then used in favour of the current request's
domain in ``mezzanine.core.managers.CurrentSiteManager``.
"""
site_id = int(request.GET["site_id"])
if not request.user.is_superuser:
try:
SitePermission.objects.get(user=request.user, sites=site_id)
except SitePermission.DoesNotExist:
raise PermissionDenied
request.session["site_id"] = site_id
admin_url = reverse("admin:index")
next = next_url(request) or admin_url
# Don't redirect to a change view for an object that won't exist
# on the selected site - go to its list view instead.
if next.startswith(admin_url):
parts = next.split("/")
if len(parts) > 4 and parts[4].isdigit():
next = "/".join(parts[:4])
return redirect(next)
def direct_to_template(request, template, extra_context=None, **kwargs):
"""
Replacement for Django's ``direct_to_template`` that uses
``TemplateResponse`` via ``mezzanine.utils.views.render``.
"""
context = extra_context or {}
context["params"] = kwargs
for (key, value) in context.items():
if callable(value):
context[key] = value()
return TemplateResponse(request, template, context)
@staff_member_required
def edit(request):
"""
Process the inline editing form.
"""
model = apps.get_model(request.POST["app"], request.POST["model"])
obj = model.objects.get(id=request.POST["id"])
form = get_edit_form(obj, request.POST["fields"], data=request.POST,
files=request.FILES)
if not (is_editable(obj, request) and has_site_permission(request.user)):
response = _("Permission denied")
elif form.is_valid():
form.save()
model_admin = ModelAdmin(model, admin.site)
message = model_admin.construct_change_message(request, form, None)
model_admin.log_change(request, obj, message)
response = ""
else:
response = list(form.errors.values())[0][0]
return HttpResponse(response)
def search(request, template="search_results.html", extra_context=None):
"""
Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model.
"""
query = request.GET.get("q", "")
page = request.GET.get("page", 1)
per_page = settings.SEARCH_PER_PAGE
max_paging_links = settings.MAX_PAGING_LINKS
try:
parts = request.GET.get("type", "").split(".", 1)
search_model = apps.get_model(*parts)
search_model.objects.search # Attribute check
except (ValueError, TypeError, LookupError, AttributeError):
search_model = Displayable
search_type = _("Everything")
else:
search_type = search_model._meta.verbose_name_plural.capitalize()
results = search_model.objects.search(query, for_user=request.user)
paginated = paginate(results, page, per_page, max_paging_links)
context = {"query": query, "results": paginated,
"search_type": search_type}
context.update(extra_context or {})
return TemplateResponse(request, template, context)
@staff_member_required
def static_proxy(request):
"""
Serves TinyMCE plugins inside the inline popups and the uploadify
SWF, as these are normally static files, and will break with
cross-domain JavaScript errors if ``STATIC_URL`` is an external
host. URL for the file is passed in via querystring in the inline
popup plugin template, and we then attempt to pull out the relative
path to the file, so that we can serve it locally via Django.
"""
normalize = lambda u: ("//" + u.split("://")[-1]) if "://" in u else u
url = normalize(request.GET["u"])
host = "//" + request.get_host()
static_url = normalize(settings.STATIC_URL)
for prefix in (host, static_url, "/"):
if url.startswith(prefix):
url = url.replace(prefix, "", 1)
response = ""
(content_type, encoding) = mimetypes.guess_type(url)
if content_type is None:
content_type = "application/octet-stream"
path = finders.find(url)
if path:
if isinstance(path, (list, tuple)):
path = path[0]
if url.endswith(".htm"):
# Inject <base href="{{ STATIC_URL }}"> into TinyMCE
# plugins, since the path static files in these won't be
# on the same domain.
static_url = settings.STATIC_URL + os.path.split(url)[0] + "/"
if not urlparse(static_url).scheme:
static_url = urljoin(host, static_url)
base_tag = "<base href='%s'>" % static_url
with open(path, "r") as f:
response = f.read().replace("<head>", "<head>" + base_tag)
else:
try:
with open(path, "rb") as f:
response = f.read()
except IOError:
return HttpResponseNotFound()
return HttpResponse(response, content_type=content_type)
def displayable_links_js(request):
"""
Renders a list of url/title pairs for all ``Displayable`` subclass
instances into JSON that's used to populate a list of links in
TinyMCE.
"""
links = []
if "mezzanine.pages" in settings.INSTALLED_APPS:
from mezzanine.pages.models import Page
is_page = lambda obj: isinstance(obj, Page)
else:
is_page = lambda obj: False
# For each item's title, we use its model's verbose_name, but in the
# case of Page subclasses, we just use "Page", and then sort the items
# by whether they're a Page subclass or not, then by their URL.
for url, obj in Displayable.objects.url_map(for_user=request.user).items():
title = getattr(obj, "titles", obj.title)
real = hasattr(obj, "id")
page = is_page(obj)
if real:
verbose_name = _("Page") if page else obj._meta.verbose_name
title = "%s: %s" % (verbose_name, title)
links.append((not page and real, {"title": str(title), "value": url}))
sorted_links = sorted(links, key=lambda link: (link[0], link[1]['value']))
return HttpResponse(dumps([link[1] for link in sorted_links]))
@requires_csrf_token
def page_not_found(request, template_name="errors/404.html"):
"""
Mimics Django's 404 handler but with a different template path.
"""
context = RequestContext(request, {
"STATIC_URL": settings.STATIC_URL,
"request_path": request.path,
})
t = get_template(template_name)
return HttpResponseNotFound(t.render(context))
@requires_csrf_token
def server_error(request, template_name="errors/500.html"):
"""
Mimics Django's error handler but adds ``STATIC_URL`` to the
context.
"""
context = RequestContext(request, {"STATIC_URL": settings.STATIC_URL})
t = get_template(template_name)
return HttpResponseServerError(t.render(context))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.