gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""
The patch module allows for a grid to be created and for data to be
defined on that grid.
Typical usage:
-- create the grid
grid = Grid1d(nx)
-- create the data that lives on that grid
data = CellCenterData1d(grid)
bcObj = bcObject(xlb="reflect", xrb="reflect"_
data.registerVar("density", bcObj)
...
data.create()
-- initialize some data
dens = data.get_var("density")
dens[:,:] = ...
-- fill the ghost cells
data.fil_lBC("density")
"""
from __future__ import print_function
import numpy
import sys
valid = ["outflow", "periodic",
"reflect", "reflect-even", "reflect-odd",
"dirichlet", "neumann"]
class BCObject:
"""
Boundary condition container -- hold the BCs on each boundary
for a single variable
"""
def __init__ (self,
xlb="outflow", xrb="outflow",
odd_reflect_dir=""):
# note: "reflect" is ambiguous and will be converted into
# either reflect-even (the default) or reflect-odd
if not xlb in valid and xrb in valid:
sys.exit("ERROR: invalid BC")
# -x boundary
self.xlb = xlb
if self.xlb == "reflect":
self.xlb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# +x boundary
self.xrb = xrb
if self.xrb == "reflect":
self.xrb = numpy.where(odd_reflect_dir == "x",
"reflect-odd", "reflect-even")
# periodic checks
if ((xlb == "periodic" and not xrb == "periodic") or
(xrb == "periodic" and not xlb == "periodic")):
sys.exit("ERROR: both xlb and xrb must be periodic")
def __str__(self):
""" print out some basic information about the BC object """
string = "BCs: -x: %s +x: %s " % \
(self.xlb, self.xrb)
return string
class Grid1d:
"""
the 1-d grid class. The grid object will contain the coordinate
information (at various centerings).
A basic (1-d) representation of the layout is:
| | | X | | | | X | | |
+--*--+- // -+--*--X--*--+--*--+- // -+--*--+--*--X--*--+- // -+--*--+
0 ng-1 ng ng+1 ... ng+nx-1 ng+nx 2ng+nx-1
ilo ihi
|<- ng ghostcells->|<---- nx interior zones ----->|<- ng ghostcells->|
The '*' marks the data locations.
"""
def __init__ (self, nx, ng=1, xmin=0.0, xmax=1.0):
"""
The class constructor function.
The only data that we require is the number of points that
make up the mesh.
We optionally take the extrema of the domain, number of ghost
cells (assume 1)
"""
# size of grid
self.nx = nx
self.ng = ng
self.qx = 2*ng+nx
# domain extrema
self.xmin = xmin
self.xmax = xmax
# compute the indices of the block interior (excluding guardcells)
self.ilo = ng
self.ihi = ng+nx-1
# define the coordinate information at the left, center, and right
# zone coordinates
self.dx = (xmax - xmin)/nx
self.xl = (numpy.arange(nx+2*ng) - ng)*self.dx + xmin
self.xr = (numpy.arange(nx+2*ng) + 1.0 - ng)*self.dx + xmin
self.x = 0.5*(self.xl + self.xr)
def scratch_array(self):
return numpy.zeros((self.qx), dtype=numpy.float64)
def __str__(self):
""" print out some basic information about the grid object """
return "1-d grid: nx = " + `self.nx` + ", ng = " + `self.ng`
class CellCenterData1d:
"""
the cell-centered data that lives on a grid.
a CellCenterData1d object is built in a multi-step process before it can
be used. We pass in a grid object to describe where the data
lives:
my_data = patch.CellCenterData1d(myGrid)
register any variables that we expect to live on this patch. Here
bcObject describes the boundary conditions for that variable.
my_data.registerVar('density', bcObject)
my_data.registerVar('x-momentum', bcObject)
...
finally, finish the initialization of the patch
my_data.create()
This last step actually allocates the storage for the state
variables. Once this is done, the patch is considered to be
locked. New variables cannot be added.
"""
def __init__ (self, grid, dtype=numpy.float64):
self.grid = grid
self.dtype = dtype
self.data = None
self.vars = []
self.nvar = 0
self.BCs = {}
# time
self.t = -1
self.initialized = 0
def register_var(self, name, bc_object):
"""
register a variable with CellCenterData1d object. Here we pass in a
BCObject that describes the boundary conditions for that
variable.
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.vars.append(name)
self.nvar += 1
self.BCs[name] = bc_object
def create(self):
"""
called after all the variables are registered and allocates
the storage for the state data
"""
if self.initialized == 1:
sys.exit("ERROR: grid already initialized")
self.data = numpy.zeros((self.nvar, self.grid.qx), dtype=self.dtype)
self.initialized = 1
def __str__(self):
""" print out some basic information about the ccData2d object """
if self.initialized == 0:
myStr = "CellCenterData1d object not yet initialized"
return myStr
myStr = "cc data: nx = " + `self.grid.nx` + \
", ng = " + `self.grid.ng` + "\n" + \
" nvars = " + `self.nvar` + "\n" + \
" variables: \n"
ilo = self.grid.ilo
ihi = self.grid.ihi
n = 0
while n < self.nvar:
myStr += "%16s: min: %15.10f max: %15.10f\n" % \
(self.vars[n],
numpy.min(self.data[n,ilo:ihi+1]),
numpy.max(self.data[n,ilo:ihi+1]) )
myStr += "%16s BCs: -x: %-12s +x: %-12s \n" %\
(" " , self.BCs[self.vars[n]].xlb,
self.BCs[self.vars[n]].xrb)
n += 1
return myStr
def get_var(self, name):
"""
return a data array the variable described by name. Any changes
made to this are automatically reflected in the CellCenterData1d
object.
"""
n = self.vars.index(name)
return self.data[n,:]
def zero(self, name):
n = self.vars.index(name)
self.data[n,:] = 0.0
def fill_BC_all(self):
"""
fill boundary conditions on all variables
"""
for name in self.vars:
self.fillBC(name)
def fill_BC(self, name):
"""
fill the boundary conditions. This operates on a single state
variable at a time, to allow for maximum flexibility
we do periodic, reflect-even, reflect-odd, and outflow
each variable name has a corresponding bc_object stored in the
ccData2d object -- we refer to this to figure out the action
to take at each boundary.
"""
# there is only a single grid, so every boundary is on
# a physical boundary (except if we are periodic)
# Note: we piggy-back on outflow and reflect-odd for
# Neumann and Dirichlet homogeneous BCs respectively, but
# this only works for a single ghost cell
n = self.vars.index(name)
# -x boundary
if self.BCs[name].xlb == "outflow" or self.BCs[name].xlb == "neumann":
i = 0
while i < self.grid.ilo:
self.data[n,i] = self.data[n,self.grid.ilo]
i += 1
elif self.BCs[name].xlb == "reflect-even":
i = 0
while i < self.grid.ilo:
self.data[n,i] = self.data[n,2*self.grid.ng-i-1]
i += 1
elif (self.BCs[name].xlb == "reflect-odd" or
self.BCs[name].xlb == "dirichlet"):
i = 0
while i < self.grid.ilo:
self.data[n,i] = -self.data[n,2*self.grid.ng-i-1]
i += 1
elif self.BCs[name].xlb == "periodic":
i = 0
while i < self.grid.ilo:
self.data[n,i] = self.data[n,self.grid.ihi-self.grid.ng+i+1]
i += 1
# +x boundary
if self.BCs[name].xrb == "outflow" or self.BCs[name].xrb == "neumann":
i = self.grid.ihi+1
while i < self.grid.nx+2*self.grid.ng:
self.data[n,i] = self.data[n,self.grid.ihi]
i += 1
elif self.BCs[name].xrb == "reflect-even":
i = 0
while i < self.grid.ng:
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n,i_bnd] = self.data[n,i_src]
i += 1
elif (self.BCs[name].xrb == "reflect-odd" or
self.BCs[name].xrb == "dirichlet"):
i = 0
while i < self.grid.ng:
i_bnd = self.grid.ihi+1+i
i_src = self.grid.ihi-i
self.data[n,i_bnd] = -self.data[n,i_src]
i += 1
elif self.BCs[name].xrb == "periodic":
i = self.grid.ihi+1
while i < 2*self.grid.ng + self.grid.nx:
self.data[n,i] = self.data[n,i-self.grid.ihi-1+self.grid.ng]
i += 1
def restrict(self, varname):
"""
restrict the variable varname to a coarser grid (factor of 2
coarser) and return an array with the resulting data (and same
number of ghostcells)
"""
fG = self.grid
fData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_c = fG.ng
nx_c = fG.nx/2
cData = numpy.zeros((2*ng_c+nx_c), dtype=self.dtype)
ilo_c = ng_c
ihi_c = ng_c+nx_c-1
# fill the coarse array with the restricted data -- just
# average the 2 fine cells into the corresponding coarse cell
# that encompasses them.
# This is done by shifting our view into the fData array and
# using a stride of 2 in the indexing.
cData[ilo_c:ihi_c+1] = \
0.5*(fData[fG.ilo :fG.ihi+1:2] + fData[fG.ilo+1:fG.ihi+1:2])
return cData
def prolong(self, varname):
"""
prolong the data in the current (coarse) grid to a finer
(factor of 2 finer) grid. Return an array with the resulting
data (and same number of ghostcells).
We will reconstruct the data in the zone from the
zone-averaged variables using the centered-difference slopes
(x)
f(x,y) = m x/dx + <f>
When averaged over the parent cell, this reproduces <f>.
Each zone's reconstrution will be averaged over 2 children.
| | | | |
| <f> | --> | | |
| | | 1 | 2 |
+-----------+ +-----+-----+
We will fill each of the finer resolution zones by filling all
the 1's together, using a stride 2 into the fine array. Then
the 2's, this allows us to operate in a vector
fashion. All operations will use the same slopes for their
respective parents.
"""
cG = self.grid
cData = self.get_var(varname)
# allocate an array for the coarsely gridded data
ng_f = cG.ng
nx_f = cG.nx*2
fData = numpy.zeros((2*ng_f+nx_f), dtype=self.dtype)
ilo_f = ng_f
ihi_f = ng_f+nx_f-1
# slopes for the coarse data
m_x = cG.scratch_array()
m_x[cG.ilo:cG.ihi+1] = \
0.5*(cData[cG.ilo+1:cG.ihi+2] - cData[cG.ilo-1:cG.ihi])
# fill the '1' children
fData[ilo_f:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] - 0.25*m_x[cG.ilo:cG.ihi+1]
# fill the '2' children
fData[ilo_f+1:ihi_f+1:2] = \
cData[cG.ilo:cG.ihi+1] + 0.25*m_x[cG.ilo:cG.ihi+1]
return fData
if __name__== "__main__":
# illustrate basic mesh operations
myg = Grid1d(16, xmax=1.0)
mydata = CellCenterData1d(myg)
bc = BCObject()
mydata.register_var("a", bc)
mydata.create()
a = mydata.get_var("a")
a[:] = numpy.exp(-(myg.x - 0.5)**2/0.1**2)
print(mydata)
| |
from Datasource import Datasource
import numpy as np
import h5py
import json
import os
class HDF5(Datasource):
""" Loads images from hdf5 files
Attributes
------------
_read: list
All load functions for :data:`_meta_files`
"""
# All readers for _meta_files
_read = [json.load]
#: All extensions of files pointing to h5
_meta_files = ['.json']
@staticmethod
def dtype(n):
only_pos = dict(zip(
map(np.dtype, ('int64', 'int32', 'int16', 'int8')),
map(np.dtype, ('uint64', 'uint32', 'uint16', 'uint8'))
))
d = n.dtype
return only_pos.get(d,d)
@staticmethod
def load_tile(t_query):
"""load a single tile (image)
Gets the image path from the \
:data:`TileQuery.RUNTIME`. ``IMAGE`` attribute.
Gets the position of the image with the whole \
volume from :meth:`TileQuery.all_scales`, \
:meth:`TileQuery.tile_origin`, and \
:meth:`TileQuery.blocksize`.
Arguments
-----------
t_query: :class:`TileQuery`
With file path and image position
Returns
-----------
np.ndarray
An image array that may be as large \
as an entire full resolution slice of \
the whole hdf5 volume. Based on the value \
of :meth:`TileQuery.all_scales`, this array \
will likely be downsampled by to a small fraction \
of the full tile resolution.
"""
# call superclass
Datasource.load_tile(t_query)
# Load data for all the h5 files
h5_files = t_query.RUNTIME.IMAGE.SOURCE.HDF5.VALUE
# Get all the z indices and coordinates
z_stops = list(enumerate(zip(*h5_files)[-1]))
z_starts = z_stops[::-1]
# Find the region to crop
sk,sj,si = t_query.all_scales
[z0,y0,x0],[z1,y1,x1] = t_query.source_tile_bounds
# Get the scaled blocksize for the output array
zb,yb,xb = t_query.blocksize
# get the right h5 files for the current z index
start_z = next((i for i, z in z_starts if z <= z0), 0)
stop_z = next((i for i, z in z_stops if z >= z1), len(z_stops))
needed_files = [h5_files[zi] for zi in range(start_z, stop_z)]
####
# Load from all needed files
####
dtype = getattr(np, t_query.OUTPUT.INFO.TYPE.VALUE)
# Make the full volume for all needed file volumes
full_vol = np.zeros([zb, yb, xb], dtype = dtype)
# Get the first offset
offset_0 = needed_files[0][-1]
# Loop through all needed h5 files
for h5_file in needed_files:
# Offset for this file
z_offset = h5_file[-1]
# Get input and output start
iz0 = max(z0 - z_offset, 0)
# Scale output bounds by z-scale
oz0 = (z_offset - offset_0) // sk
# Load the image region from the h5 file
with h5py.File(h5_file[0]) as fd:
# read from one file
vol = fd[h5_file[1]]
# Get the input and output end-bounds
iz1 = min(z1 - z_offset, vol.shape[0])
# Scale the output bounds by the z-scale
dz = iz1 - iz0
oz1 = oz0 + dz // sk
# Get the volume from one file
file_vol = vol[iz0:iz1:sk, y0:y1:sj, x0:x1:si]
yf, xf = file_vol.shape[1:]
# Add the volume to the full volume
full_vol[oz0:oz1,:yf,:xf] = file_vol
# Combined from all files
return full_vol
@staticmethod
def load_file(h5_file):
""" Load the needed volume from a single h5 File
Arguments
-----------
t_query: :class:`TileQuery`
With file path and image position
"""
@staticmethod
def preload_source(t_query):
"""load info from example tile (image)
Calls :meth:`valid_path` to get filename and \
inner dataset path for the full h5 image volume.
Then gets three needed values from the given \
path from the :class:`TileQuery` t_query
Arguments
-----------
t_query: :class:`TileQuery`
Only the file path is needed
Returns
--------
dict
Will be empty if :meth:`valid_path` finds\
this filname to not give a valid h5 volume.
* :class:`RUNTIME` ``.IMAGE.BLOCK.NAME``
(numpy.ndarray) -- 3x1 for any give tile shape
* :class:`OUTPUT` ``.INFO.TYPE.NAME``
(str) -- numpy dtype of any given tile
* :class:`OUTPUT` ``.INFO.SIZE.NAME``
(numpy.ndarray) -- 3x1 for full volume shape
"""
# Keyword names
output = t_query.OUTPUT.INFO
runtime = t_query.RUNTIME.IMAGE
k_h5 = runtime.SOURCE.HDF5.NAME
# Get the max block size in bytes for a single tile
max_bytes = t_query.RUNTIME.CACHE.MAX_BLOCK.VALUE
max_bytes = int(max_bytes/64)
# Check if path is valid
keywords = HDF5.valid_path(t_query)
if not keywords:
return {}
# Validate highest in z file name and dataset
filename = keywords[k_h5][-1][0]
dataset = keywords[k_h5][-1][1]
offset = keywords[k_h5][-1][2]
# Load properties from H5 dataset
with h5py.File(filename,'r') as fd:
# Get the volume
vol = fd[dataset]
# Get a shape for all the files
shape = np.uint32(vol.shape)
shape[0] += offset
####
# Get a blockshape as a flat section
####
# Get the bytes for a full slice
voxel_bytes = np.uint32(vol.dtype.itemsize)
slice_bytes = voxel_bytes * np.prod(shape[1:])
# Get the nearest tile size under cache limit
square_overage = np.ceil(slice_bytes / max_bytes)
side_scalar = np.ceil(np.sqrt(square_overage))
# Set the actual blocksize to be under the cache limit
plane_shape = np.ceil(shape[1:] / side_scalar)
max_block = np.r_[[64], plane_shape]
####
# Get max blocksizes for different resolutions
####
lo_res = 1
# Get all block sizes by halving the max block size
all_blocks = [shape/(2**res) for res in range(lo_res)]
block_array = np.clip(np.ceil(all_blocks), 1, max_block)
# return named keywords
keywords.update({
runtime.BLOCK.NAME: np.uint32(block_array),
output.SIZE.NAME: np.uint32(shape),
output.TYPE.NAME: str(HDF5.dtype(vol)),
})
# Combine results with parent method
common = Datasource.preload_source(t_query)
return dict(common, **keywords)
@staticmethod
def valid_path(t_query):
""" Check if filename can access h5 data
The filename can be a path to a json file \
that lists an h5 file and dataset path, or \
the filename can be a direct path to an h5 \
file. In either case the 'outer' file path \
directly to the h5 file and the 'inner' \
dataset path will be returned.
Arguments
-----------
t_query: :class:`TileQuery`
Only the file path is needed
Returns
--------
dict
Empty if not a valid h5 volume
* :class:`RUNTIME` ``.IMAGE.SOURCE.HDF5.OUTER.NAME``
(str) -- The direct filename to an hdf5 file
* :class:`RUNTIME` ``.IMAGE.SOURCE.HDF5.INNER.NAME``
(str) -- The datset in the file with image data
"""
# Dereference path to hdf5 data
k_h5 = t_query.RUNTIME.IMAGE.SOURCE.HDF5.NAME
h5_list = HDF5.load_info(t_query)
# load all the files
for h5_file in h5_list:
try:
# Try to load one file
with h5py.File(h5_file[0],'r') as fd:
if h5_file[1] not in fd.keys():
h5_file[1] = fd.keys()[0]
except:
return {}
# sort by z start
def z_sort(h_file):
return h_file[-1]
# return reverse sorted files
return {
k_h5: sorted(h5_list, key=z_sort)
}
@staticmethod
def get_details(h5_info, file_dict):
""" Get all needed h5 file info from a pointer file
Arguments
----------
file_dict: dict
Contains keys for INNER, OUTER, and OFF values
Returns
--------
list
All INNER, OUTER, OFF values in a flat list
"""
# Get values for actual hdf5 file
outer_path = file_dict.get(h5_info.OUTER.NAME)
inner_path = file_dict.get(h5_info.INNER.NAME)
z_offset = file_dict.get(h5_info.OFF.NAME, 0)
return [outer_path, inner_path, z_offset]
@staticmethod
def load_info(t_query):
""" Gets the h5 volume filename and datapath
If the t_query path has an extension in \
the :data:`_meta_files` and the file contains \
``RUNTIME.IMAGE.SOURCE.HDF5.OUTER.NAME`` \
and ``RUNTIME.IMAGE.SOURCE.HDF5.INNER.NAME`` \
keys, then the values of those keys are returned. \
If any of those statements is not true, then the \
original t_query path is returned along with the \
default dataset given by \
``RUNTIME.IMAGE.SOURCE.HDF5.INNER.VALUE``.
Arguments
-----------
t_query: :class:`TileQuery`
Only the file path is needed
Returns
--------
list
* The direct filename to an hdf5 file
* The datset in the file with image data
"""
# Load information about full hdf5
h5_info = t_query.RUNTIME.IMAGE.SOURCE.HDF5
filename = t_query.OUTPUT.INFO.PATH.VALUE
dataset = h5_info.INNER.VALUE
# Get all details for info
def get_details(info):
return HDF5.get_details(h5_info, info)
# Load path if ends with json
ending = os.path.splitext(filename)[1]
if ending in HDF5._meta_files:
# Get function to read the metainfo file
order = HDF5._meta_files.index(ending)
try:
with open(filename) as infile:
# Read the metainfo file
info = HDF5._read[order](infile)
except IOError:
return [[filename, dataset, 0]]
######
## Handle references to multiple h5 files
## Get first item in list
######
if isinstance(info, list):
return map(get_details, info)
# Get the inner dataset and the new path
return [get_details(info)]
return [[filename, dataset, 0]]
| |
import inspect
import logging
import traceback
import urllib
import urllib.parse
from collections import namedtuple
from decorator import decorator
from tornado.escape import to_basestring
from tornado.web import RequestHandler, HTTPError
from dorthy import template
from dorthy.enum import DeclarativeEnum
from dorthy.json import jsonify
from dorthy.security.auth import AuthorizationHeaderToken
from dorthy.session import session_store, Session
from dorthy.request import WebRequestHandlerProxyMixin
from dorthy.security import SecurityManager, AccessDeniedError, AuthenticationException
from dorthy.settings import config
from dorthy.utils import native_str, parse_json
logger = logging.getLogger(__name__)
ErrorResponse = namedtuple("ErrorResponse",
["status_code", "message", "exception", "stack"])
class MediaTypes(DeclarativeEnum):
HTML = "text/html"
JSON = "application/json"
def consumes(media=MediaTypes.JSON, arg_name="model",
request_arg=None, optional_request_arg=False, underscore_case=True, object_dict_wrapper=True):
def _parse_json(handler):
if request_arg is None:
s = to_basestring(handler.request.body)
else:
arg = handler.get_argument(request_arg, None)
if arg is None and not optional_request_arg:
raise HTTPError(400, "Argument missing: {}".format(request_arg))
s = to_basestring(arg)
return parse_json(s, underscore_case=underscore_case, object_dict_wrapper=object_dict_wrapper) if s else None
def _consumes(f, handler, *args, **kwargs):
# check for proper content type if request_arg is not set
# if request_arg is set assume mixed content -- i.e. files and data
if request_arg is None and not handler.request.headers.get("Content-Type", "").startswith(media.value):
raise HTTPError(400, "Invalid Content-Type received.")
if media == MediaTypes.JSON:
# check keyword args first
if arg_name in kwargs:
kwargs[arg_name] = _parse_json(handler)
else:
sig = inspect.signature(f)
params = sig.parameters
for indx, (name, param) in enumerate(params.items()):
if name == arg_name or \
(param.annotation != inspect.Parameter.empty and param.annotation == "model"):
args = list(args)
args[indx - 1] = _parse_json(handler)
break
# model param not contained in method signature
if indx == len(args):
raise TypeError("No model argument found in method signature")
else:
raise HTTPError(500, "MediaType not supported.")
return f(handler, *args, **kwargs)
return decorator(_consumes)
def produces(media=MediaTypes.JSON, root=None, camel_case=True, ignore_attributes=None):
def _produces(f, handler, *args, **kwargs):
handler.media_type = media
result = f(handler, *args, **kwargs)
handler.write_results(result,
media=media,
root=root,
camel_case=camel_case,
ignore_attributes=ignore_attributes)
return decorator(_produces)
def mediatype(media=MediaTypes.JSON):
def _mediatype(f, handler, *args, **kwargs):
handler.media_type = media
handler.set_header("Content-Type", media.value)
return f(handler, *args, **kwargs)
return decorator(_mediatype)
@decorator
def render(f, handler, *args, **kwargs):
val = f(handler, *args, **kwargs)
if val and not handler.finished:
if isinstance(val, str):
handler.render(val)
else:
handler.render(val[0], **val[1])
@decorator
def redirect(f, handler, *args, **kwargs):
val = f(handler, *args, **kwargs)
if val and not handler.finished:
handler.redirect(val)
class BaseHandler(RequestHandler, WebRequestHandlerProxyMixin):
SESSION_COOKIE_KEY = "s"
DEFAULT_SESSION_TIMEOUT = 1800
if "web.session_timeout" in config:
DEFAULT_SESSION_TIMEOUT = config.web.get("session_timeout", 1800)
COOKIE_DOMAIN = None
if "web.cookie_domain" in config:
COOKIE_DOMAIN = config.web.cookie_domain
USE_SECURE_COOKIE = True if "web.cookie_secret" in config and config.web.enabled("cookie_secret") else False
def __init__(self, application, request, **kwargs):
self.media_type = MediaTypes.HTML
self.application = application
self._request_finished = False
self.__session = None
self.__debug = "debug" in self.application.settings and \
self.application.settings["debug"]
self.__client_ip = None
# initialize framework template system -- replace tornado's
if "template_conf" in self.application.settings:
self.require_setting("template_path", feature="@dorthy.web.template.engine")
conf = dict(self.application.settings["template_conf"])
conf["auto_reload"] = self.__debug
template.config_environment(self.get_template_path(), **conf)
self.__use_framework_templates = True
super().__init__(application, request, **kwargs)
@property
def client_ip(self):
"""
Provides a method to retrieve the client ip address
behind a load balancer (ELB)
"""
if not self.__client_ip:
ip = self.request.headers.get("X-Forwarded-For", self.request.remote_ip)
self.__client_ip = ip.split(",")[0].strip()
return self.__client_ip
@property
def debug(self):
return self.__debug
@property
def finished(self):
return self._finished
def get_user_agent(self):
return self.request.headers.get("User-Agent")
def get_current_user(self):
if SecurityManager().authenticated():
return SecurityManager().get_principal()
else:
return None
def get_user_locale(self):
principal = self.get_current_user()
if principal is not None:
return principal.locale
else:
return None
def set_secure_cookie(self, name, value, expires_days=None,
domain=None, expires=None, path="/", **kwargs):
if domain is None and self.COOKIE_DOMAIN is not None:
domain = self.COOKIE_DOMAIN
super().set_secure_cookie(name,
value,
expires_days=expires_days,
domain=domain,
expires=expires,
path=path,
**kwargs)
def set_nocache_headers(self):
self.set_header('Cache-Control', 'no-cache, no-store, must-revalidate')
self.set_header('Pragma', 'no-cache')
self.set_header('Expires', 0)
def clear_session(self):
session = self.get_session()
if session is not None:
session.invalidate()
def get_session(self, create=False, timeout=DEFAULT_SESSION_TIMEOUT, update_access=True):
if self.__session is None:
session_id = self.__get_session_cookie()
if session_id:
self.__session = session_store.load(native_str(session_id))
if self.__session is None and create:
self.__session = Session(session_store.generate_session_id(),
timeout=timeout,
update_access=update_access)
self.__set_session_cookie()
return self.__session
def __get_session_cookie(self):
if self.USE_SECURE_COOKIE:
return self.get_secure_cookie(self.SESSION_COOKIE_KEY)
else:
return self.get_cookie(self.SESSION_COOKIE_KEY)
def __set_session_cookie(self):
if self.__session is not None:
if self.USE_SECURE_COOKIE:
self.set_secure_cookie(self.SESSION_COOKIE_KEY, self.__session.session_id)
else:
self.set_cookie(self.SESSION_COOKIE_KEY, self.__session.session_id)
else:
logger.warn("Set Session cookie called for empty session.")
def __save_session(self):
# load the session if it exists so that the session
# store can update its timestamp / expiration period
session = self.get_session()
if session is not None:
session_store.save(session)
if not session.valid:
self.clear_cookie(self.SESSION_COOKIE_KEY)
elif self.__get_session_cookie():
self.clear_cookie(self.SESSION_COOKIE_KEY)
def on_finish(self):
pass
def render(self, template_name, **kwargs):
"""
Renders the given template using the passed keyword args
:param template_name: the template name
:param kwargs: keyword args passed to the template
"""
if self.__use_framework_templates:
temp = template.get_template(self.get_template_path(), template_name)
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(kwargs)
rendered = temp.render(namespace)
self.finish(rendered)
else:
super().render(template_name, kwargs)
def finish(self, chunk=None):
if not self._request_finished:
# prevents a recursive loop on finish if exception raised
self._request_finished = True
self.__save_session()
self.on_finish()
super().finish(chunk)
def write_error(self, status_code, **kwargs):
e = stack = None
exc_info = kwargs.get("exc_info", None)
if status_code == 401:
message = "User not authorized."
elif status_code == 403:
message = "User forbidden."
else:
if exc_info:
if isinstance(exc_info[1], AccessDeniedError):
status_code = 403
message = "User forbidden."
self.set_status(403, message)
elif isinstance(exc_info[1], AuthenticationException):
status_code = 401
message = "User not authorized."
self.set_status(401, message)
else:
t, e, tb = exc_info
stack = traceback.format_tb(tb)
message = str(e) if self.debug else "An internal server error occurred."
else:
message = "An unknown error occurred."
error = ErrorResponse(status_code=status_code,
message=message,
exception=e.__class__.__name__ if self.debug and e else None,
stack=stack if self.debug and stack else None)
if self.media_type == MediaTypes.JSON:
self.set_header("Content-Type", MediaTypes.JSON.value)
self.write(jsonify(error._asdict(), "error"))
else:
if self.debug:
self.render("error/error-dev.html", error=error)
else:
self.render("error/error.html", error=error)
def write_results(self, results, media=MediaTypes.JSON, root=None, camel_case=True, ignore_attributes=None):
self.media_type = media
self.set_header("Content-Type", media.value)
if results and not self.finished:
if media == MediaTypes.JSON:
if root is None and "produces_wrapper" in self.application.settings:
root_wrapper = self.application.settings["produces_wrapper"]
else:
root_wrapper = root
self.write(jsonify(results,
root=root_wrapper,
camel_case=camel_case,
ignore_attributes=ignore_attributes))
elif media == MediaTypes.HTML:
self.write(results)
class TemplateHandler(BaseHandler):
def initialize(self, template, status=None):
self.template = template
self.status = status
def get(self):
if self.status:
self.set_status(self.status)
self.render(self.template)
def authenticated(redirect=False, allow_header_auth=False):
def _authenticated(f, handler, *args, **kwargs):
if not SecurityManager().authenticated():
SecurityManager().load_context(handler)
if not SecurityManager().authenticated():
if allow_header_auth and authenticate_token(handler):
return f(handler, *args, **kwargs)
if redirect and handler.request.method in ("GET", "POST", "HEAD"):
url = handler.get_login_url()
if "?" not in url:
if urllib.parse.urlsplit(url).scheme:
next_url = handler.request.full_url()
else:
next_url = handler.request.uri
url += "?" + urllib.parse.urlencode(dict(next=next_url))
handler.redirect(url)
return
raise AuthenticationException("User not authorized.")
return f(handler, *args, **kwargs)
def authenticate_token(handler):
if "Authorization" in handler.request.headers:
auth_headers = handler.request.headers.get_list("Authorization")
# only support one auth header in a request
if len(auth_headers) == 1:
auth = auth_headers[0]
parts = auth.strip().partition(" ")
if parts[0] and parts[2]:
token = AuthorizationHeaderToken(parts[0], parts[2].strip(), handler)
auth_provider = SecurityManager().get_authentication_provider(token)
if auth_provider:
auth_provider.authenticate(token)
if SecurityManager().authenticated():
return True
else:
logger.warn("No authentication provider found for header: %s", auth)
return False
return decorator(_authenticated)
| |
import h5py
from worldengine.version import __version__
from worldengine.biome import biome_name_to_index, biome_index_to_name
from worldengine.world import World, Step
import numpy
def save_world_to_hdf5(world, filename):
f = h5py.File(filename, libver='latest', mode='w')
general_grp = f.create_group("general")
general_grp["worldengine_version"] = __version__
general_grp["name"] = world.name
general_grp["width"] = world.width
general_grp["height"] = world.height
elevation_grp = f.create_group("elevation")
elevation_ths_grp = elevation_grp.create_group("thresholds")
elevation_ths_grp["sea"] = world.elevation['thresholds'][0][1]
elevation_ths_grp["plain"] = world.elevation['thresholds'][1][1]
elevation_ths_grp["hill"] = world.elevation['thresholds'][2][1]
elevation_data = elevation_grp.create_dataset("data", (world.height, world.width), dtype=numpy.float)
elevation_data.write_direct(world.elevation['data'])
plates_data = f.create_dataset("plates", (world.height, world.width), dtype=numpy.uint16)
plates_data.write_direct(world.plates)
ocean_data = f.create_dataset("ocean", (world.height, world.width), dtype=numpy.bool)
ocean_data.write_direct(world.ocean)
sea_depth_data = f.create_dataset("sea_depth", (world.height, world.width), dtype=numpy.float)
sea_depth_data.write_direct(world.sea_depth)
if hasattr(world, 'biome'):
biome_data = f.create_dataset("biome", (world.height, world.width), dtype=numpy.uint16)
for y in range(world.height):
for x in range(world.width):
biome_data[y, x] = biome_name_to_index(world.biome[y][x])
if hasattr(world, 'humidity'):
humidity_grp = f.create_group("humidity")
humidity_quantiles_grp = humidity_grp.create_group("quantiles")
for k in world.humidity['quantiles'].keys():
humidity_quantiles_grp[k] = world.humidity['quantiles'][k]
humidity_data = humidity_grp.create_dataset("data", (world.height, world.width), dtype=numpy.float)
humidity_data.write_direct(world.humidity['data'])
if hasattr(world, 'irrigation'):
irrigation_data = f.create_dataset("irrigation", (world.height, world.width), dtype=numpy.float)
irrigation_data.write_direct(world.irrigation)
if hasattr(world, 'permeability'):
permeability_grp = f.create_group("permeability")
permeability_ths_grp = permeability_grp.create_group("thresholds")
permeability_ths_grp['low'] = world.permeability['thresholds'][0][1]
permeability_ths_grp['med'] = world.permeability['thresholds'][1][1]
permeability_data = permeability_grp.create_dataset("data", (world.height, world.width), dtype=numpy.float)
permeability_data.write_direct(world.permeability['data'])
if hasattr(world, 'watermap'):
watermap_grp = f.create_group("watermap")
watermap_ths_grp = watermap_grp.create_group("thresholds")
watermap_ths_grp['creek'] = world.watermap['thresholds']['creek']
watermap_ths_grp['river'] = world.watermap['thresholds']['river']
watermap_ths_grp['mainriver'] = world.watermap['thresholds']['main river']
watermap_data = watermap_grp.create_dataset("data", (world.height, world.width), dtype=numpy.float)
watermap_data.write_direct(world.watermap['data'])
if hasattr(world, 'precipitation'):
precipitation_grp = f.create_group("precipitation")
precipitation_ths_grp = precipitation_grp.create_group("thresholds")
precipitation_ths_grp['low'] = world.precipitation['thresholds'][0][1]
precipitation_ths_grp['med'] = world.precipitation['thresholds'][1][1]
precipitation_data = precipitation_grp.create_dataset("data", (world.height, world.width), dtype=numpy.float)
precipitation_data.write_direct(world.precipitation['data'])
if hasattr(world, 'temperature'):
temperature_grp = f.create_group("temperature")
temperature_ths_grp = temperature_grp.create_group("thresholds")
temperature_ths_grp['polar'] = world.temperature['thresholds'][0][1]
temperature_ths_grp['alpine'] = world.temperature['thresholds'][1][1]
temperature_ths_grp['boreal'] = world.temperature['thresholds'][2][1]
temperature_ths_grp['cool'] = world.temperature['thresholds'][3][1]
temperature_ths_grp['warm'] = world.temperature['thresholds'][4][1]
temperature_ths_grp['subtropical'] = world.temperature['thresholds'][5][1]
temperature_data = temperature_grp.create_dataset("data", (world.height, world.width), dtype=numpy.float)
temperature_data.write_direct(world.temperature['data'])
if hasattr(world, 'icecap'):
icecap_data = f.create_dataset("icecap", (world.height, world.width), dtype=numpy.float)
icecap_data.write_direct(world.icecap)
if hasattr(world, 'lake_map'):
lake_map_data = f.create_dataset("lake_map", (world.height, world.width), dtype=numpy.float)
lake_map_data.write_direct(world.lake_map)
if hasattr(world, 'river_map'):
river_map_data = f.create_dataset("river_map", (world.height, world.width), dtype=numpy.float)
river_map_data.write_direct(world.river_map)
generation_params_grp = f.create_group("generation_params")
generation_params_grp['seed'] = world.seed
generation_params_grp['n_plates'] = world.n_plates
generation_params_grp['ocean_level'] = world.ocean_level
generation_params_grp['step'] = world.step.name
f.close()
def _from_hdf5_quantiles(p_quantiles):
quantiles = {}
for p_quantile in p_quantiles:
quantiles[p_quantile.title()] = p_quantiles[p_quantile].value
return quantiles
def _from_hdf5_matrix_with_quantiles(p_matrix):
matrix = dict()
matrix['data'] = p_matrix['data']
matrix['quantiles'] = _from_hdf5_quantiles(p_matrix['quantiles'])
return matrix
def load_world_to_hdf5(filename):
f = h5py.File(filename, libver='latest', mode='r')
w = World(f['general/name'].value,
f['general/width'].value,
f['general/height'].value,
f['generation_params/seed'].value,
f['generation_params/n_plates'].value,
f['generation_params/ocean_level'].value,
Step.get_by_name(f['generation_params/step'].value))
# Elevation
e = numpy.array(f['elevation/data'])
e_th = [('sea', f['elevation/thresholds/sea'].value),
('plain', f['elevation/thresholds/plain'].value),
('hill', f['elevation/thresholds/hill'].value),
('mountain', None)]
w.set_elevation(e, e_th)
# Plates
w.set_plates(numpy.array(f['plates']))
# Ocean
w.set_ocean(numpy.array(f['ocean']))
w.sea_depth = numpy.array(f['sea_depth'])
# Biome
if 'biome' in f.keys():
biome_data = []
for y in range(w.height):
row = []
for x in range(w.width):
value = f['biome'][y, x]
row.append(biome_index_to_name(value))
biome_data.append(row)
biome = numpy.array(biome_data, dtype=object)
w.set_biome(biome)
# Humidity
# FIXME: use setters
if 'humidity' in f.keys():
w.humidity = _from_hdf5_matrix_with_quantiles(f['humidity'])
w.humidity['data'] = numpy.array(w.humidity['data']) # numpy conversion
if 'irrigation' in f.keys():
w.irrigation = numpy.array(f['irrigation'])
if 'permeability' in f.keys():
p = numpy.array(f['permeability/data'])
p_th = [
('low', f['permeability/thresholds/low'].value),
('med', f['permeability/thresholds/med'].value),
('hig', None)
]
w.set_permeability(p, p_th)
if 'watermap' in f.keys():
w.watermap = dict()
w.watermap['data'] = numpy.array(f['watermap/data'])
w.watermap['thresholds'] = {}
w.watermap['thresholds']['creek'] = f['watermap/thresholds/creek'].value
w.watermap['thresholds']['river'] = f['watermap/thresholds/river'].value
w.watermap['thresholds']['main river'] = f['watermap/thresholds/mainriver'].value
if 'precipitation' in f.keys():
p = numpy.array(f['precipitation/data'])
p_th = [
('low', f['precipitation/thresholds/low'].value),
('med', f['precipitation/thresholds/med'].value),
('hig', None)
]
w.set_precipitation(p, p_th)
if 'temperature' in f.keys():
t = numpy.array(f['temperature/data'])
t_th = [
('polar', f['temperature/thresholds/polar'].value),
('alpine', f['temperature/thresholds/alpine'].value),
('boreal', f['temperature/thresholds/boreal'].value),
('cool', f['temperature/thresholds/cool'].value),
('warm', f['temperature/thresholds/warm'].value),
('subtropical', f['temperature/thresholds/subtropical'].value),
('tropical', None)
]
w.set_temperature(t, t_th)
if 'icecap' in f.keys():
w.icecap = numpy.array(f['icecap'])
if 'lake_map' in f.keys():
m = numpy.array(f['lake_map'])
w.set_lakemap(m)
if 'river_map' in f.keys():
m = numpy.array(f['river_map'])
w.set_rivermap(m)
f.close()
return w
| |
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import sys
import socket
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import u, Configurable, errno_from_exception
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
try:
import certifi
except ImportError:
# certifi is optional as long as we have ssl.create_default_context.
if ssl is None or hasattr(ssl, 'create_default_context'):
certifi = None
else:
raise
try:
xrange # py2
except NameError:
xrange = range # py3
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
ssl_match_hostname = ssl.match_hostname
SSLCertificateError = ssl.CertificateError
elif ssl is None:
ssl_match_hostname = SSLCertificateError = None
else:
import backports.ssl_match_hostname
ssl_match_hostname = backports.ssl_match_hostname.match_hostname
SSLCertificateError = backports.ssl_match_hostname.CertificateError
if hasattr(ssl, 'SSLContext'):
if hasattr(ssl, 'create_default_context'):
# Python 2.7.9+, 3.4+
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH)
else:
# Python 3.2-3.3
_client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
_client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED
_client_ssl_defaults.load_verify_locations(certifi.where())
_server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
elif ssl:
# Python 2.6-2.7.8
_client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED,
ca_certs=certifi.where())
_server_ssl_defaults = {}
else:
# Google App Engine
_client_ssl_defaults = dict(cert_reqs=None,
ca_certs=None)
_server_ssl_defaults = {}
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
# getaddrinfo attempts to import encodings.idna. If this is done at
# module-import time, the import lock is already held by the main thread,
# leading to deadlock. Avoid it by caching the idna encoder on the main
# thread now.
u('foo').encode('idna')
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# Default backlog used when calling sock.listen()
_DEFAULT_BACKLOG = 128
def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
backlog=_DEFAULT_BACKLOG, flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
0, flags)):
af, socktype, proto, canonname, sockaddr = res
if (sys.platform == 'darwin' and address == 'localhost' and
af == socket.AF_INET6 and sockaddr[3] != 0):
# Mac OS X includes a link-local address fe80::1%lo0 in the
# getaddrinfo results for 'localhost'. However, the firewall
# doesn't understand that this is a local address and will
# prompt for access (often repeatedly, due to an apparent
# bug in its ability to remember granting access to an
# application). Skip these addresses.
continue
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
bound_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, 'AF_UNIX'):
def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
except OSError as err:
if errno_from_exception(err) != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(sock, callback, io_loop=None):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
def accept_handler(fd, events):
# More connections may come in while we're handling callbacks;
# to prevent starvation of other tasks we must limit the number
# of connections we accept at a time. Ideally we would accept
# up to the number of connections that were waiting when we
# entered this method, but this information is not available
# (and rearranging this method to call accept() as many times
# as possible before running any callbacks would have adverse
# effects on load balancing in multiprocess configurations).
# Instead, we use the (default) listen backlog as a rough
# heuristic for the number of connections we can reasonably
# accept at once.
for i in xrange(_DEFAULT_BACKLOG):
try:
connection, address = sock.accept()
except socket.error as e:
# _ERRNO_WOULDBLOCK indicate we have accepted every
# connection that is available.
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
callback(connection, address)
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
def is_valid_ip(ip):
"""Returns true if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
if not ip or '\x00' in ip:
# getaddrinfo resolves empty strings to localhost, and truncates
# on zero bytes.
return False
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver`
* `tornado.netutil.ThreadedResolver`
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
"""
@classmethod
def configurable_base(cls):
return Resolver
@classmethod
def configurable_default(cls):
return BlockingResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
"""
raise NotImplementedError()
def close(self):
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None, executor=None, close_executor=True):
self.io_loop = io_loop or IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self):
if self.close_executor:
self.executor.shutdown()
self.executor = None
@run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
"""
def initialize(self, io_loop=None):
super(BlockingResolver, self).initialize(io_loop=io_loop)
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
"""
_threadpool = None
_threadpool_pid = None
def initialize(self, io_loop=None, num_threads=10):
threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize(
io_loop=io_loop, executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(cls, num_threads):
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
from concurrent.futures import ThreadPoolExecutor
cls._threadpool = ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs.
"""
def initialize(self, resolver, mapping):
self.resolver = resolver
self.mapping = mapping
def close(self):
self.resolver.close()
def resolve(self, host, port, *args, **kwargs):
if (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
'cert_reqs', 'ca_certs', 'ciphers'])
def ssl_options_to_context(ssl_options):
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, dict):
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
if (not hasattr(ssl, 'SSLContext') or
isinstance(ssl_options, ssl.SSLContext)):
return ssl_options
context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options:
context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
if 'cert_reqs' in ssl_options:
context.verify_mode = ssl_options['cert_reqs']
if 'ca_certs' in ssl_options:
context.load_verify_locations(ssl_options['ca_certs'])
if 'ciphers' in ssl_options:
context.set_ciphers(ssl_options['ciphers'])
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant wasn't added until python 3.3.
context.options |= ssl.OP_NO_COMPRESSION
return context
def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
# Python doesn't have server-side SNI support so we can't
# really unittest this, but it can be manually tested with
# python3.2 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return context.wrap_socket(socket, **kwargs)
else:
return ssl.wrap_socket(socket, **dict(context, **kwargs))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._disks_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_grant_access_request_initial, build_list_by_resource_group_request, build_list_request, build_revoke_access_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
"""DisksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2017_03_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'Disk')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> AsyncLROPoller["_models.Disk"]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation.
:type disk: ~azure.mgmt.compute.v2017_03_30.models.Disk
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'DiskUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Disk"]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation.
:type disk: ~azure.mgmt.compute.v2017_03_30.models.DiskUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> "_models.Disk":
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.Disk
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(grant_access_data, 'GrantAccessData')
request = build_grant_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._grant_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
@distributed_trace_async
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2017_03_30.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.AccessUri]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self._revoke_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
@distributed_trace_async
async def begin_revoke_access(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class DocumentTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents(sid="ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Documents/ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents(sid="ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents(sid="ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Documents/ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents(sid="ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents.create()
self.holodeck.assert_has_request(Request(
'post',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Documents',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents.list()
self.holodeck.assert_has_request(Request(
'get',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Documents',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"documents": [],
"meta": {
"first_page_url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents?PageSize=50&Page=0",
"key": "documents",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"documents": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
}
}
],
"meta": {
"first_page_url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents?PageSize=50&Page=0",
"key": "documents",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents(sid="ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://sync.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Documents/ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_expires": "2015-07-30T21:00:00Z",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "unique_name",
"url": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"permissions": "https://sync.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Documents/ETaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Permissions"
}
}
'''
))
actual = self.client.sync.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents(sid="ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| |
from flask_admin._compat import as_unicode
import peewee
from wtforms import fields, validators
from flask_admin import form
from flask_admin._compat import iteritems
from flask_admin.contrib.peewee import ModelView
from . import setup
from datetime import datetime, time, date
class CustomModelView(ModelView):
def __init__(self, model,
name=None, category=None, endpoint=None, url=None,
**kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(CustomModelView, self).__init__(model,
name, category,
endpoint, url)
def create_models(db):
class BaseModel(peewee.Model):
class Meta:
database = db
class Model1(BaseModel):
def __init__(self, test1=None, test2=None, test3=None, test4=None,
date_field=None, timeonly_field=None,
datetime_field=None, **kwargs):
super(Model1, self).__init__(**kwargs)
self.test1 = test1
self.test2 = test2
self.test3 = test3
self.test4 = test4
self.date_field = date_field
self.timeonly_field = timeonly_field
self.datetime_field = datetime_field
test1 = peewee.CharField(max_length=20, null=True)
test2 = peewee.CharField(max_length=20, null=True)
test3 = peewee.TextField(null=True)
test4 = peewee.TextField(null=True)
date_field = peewee.DateField(null=True)
timeonly_field = peewee.TimeField(null=True)
datetime_field = peewee.DateTimeField(null=True)
def __str__(self):
# "or ''" fixes error when loading choices for relation field:
# TypeError: coercing to Unicode: need string or buffer, NoneType found
return self.test1 or ''
class Model2(BaseModel):
def __init__(self, char_field=None, int_field=None, float_field=None,
bool_field=0, **kwargs):
super(Model2, self).__init__(**kwargs)
self.char_field = char_field
self.int_field = int_field
self.float_field = float_field
self.bool_field = bool_field
char_field = peewee.CharField(max_length=20)
int_field = peewee.IntegerField(null=True)
float_field = peewee.FloatField(null=True)
bool_field = peewee.BooleanField()
# Relation
model1 = peewee.ForeignKeyField(Model1, null=True)
Model1.create_table()
Model2.create_table()
return Model1, Model2
def fill_db(Model1, Model2):
Model1('test1_val_1', 'test2_val_1').save()
Model1('test1_val_2', 'test2_val_2').save()
Model1('test1_val_3', 'test2_val_3').save()
Model1('test1_val_4', 'test2_val_4').save()
Model1(None, 'empty_obj').save()
Model2('char_field_val_1', None, None, bool_field=True).save()
Model2('char_field_val_2', None, None, bool_field=False).save()
Model2('char_field_val_3', 5000, 25.9).save()
Model2('char_field_val_4', 9000, 75.5).save()
Model2('char_field_val_5', 6169453081680413441).save()
Model1('date_obj1', date_field=date(2014, 11, 17)).save()
Model1('date_obj2', date_field=date(2013, 10, 16)).save()
Model1('timeonly_obj1', timeonly_field=time(11, 10, 9)).save()
Model1('timeonly_obj2', timeonly_field=time(10, 9, 8)).save()
Model1('datetime_obj1', datetime_field=datetime(2014, 4, 3, 1, 9, 0)).save()
Model1('datetime_obj2', datetime_field=datetime(2013, 3, 2, 0, 8, 0)).save()
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1)
admin.add_view(view)
assert view.model == Model1
assert view.name == 'Model1'
assert view.endpoint == 'model1'
assert view._primary_key == 'id'
assert 'test1' in view._sortable_columns
assert 'test2' in view._sortable_columns
assert 'test3' in view._sortable_columns
assert 'test4' in view._sortable_columns
assert view._create_form_class is not None
assert view._edit_form_class is not None
assert not view._search_supported
assert view._filters is None
# Verify form
assert view._create_form_class.test1.field_class == fields.StringField
assert view._create_form_class.test2.field_class == fields.StringField
assert view._create_form_class.test3.field_class == fields.TextAreaField
assert view._create_form_class.test4.field_class == fields.TextAreaField
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1/')
assert rv.status_code == 200
rv = client.get('/admin/model1/new/')
assert rv.status_code == 200
rv = client.post('/admin/model1/new/',
data=dict(test1='test1large', test2='test2'))
assert rv.status_code == 302
model = Model1.select().get()
assert model.test1 == 'test1large'
assert model.test2 == 'test2'
assert model.test3 is None or model.test3 == ''
assert model.test4 is None or model.test4 == ''
rv = client.get('/admin/model1/')
assert rv.status_code == 200
assert b'test1large' in rv.data
url = '/admin/model1/edit/?id=%s' % model.id
rv = client.get(url)
assert rv.status_code == 200
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
assert rv.status_code == 302
model = Model1.select().get()
assert model.test1 == 'test1small'
assert model.test2 == 'test2large'
assert model.test3 is None or model.test3 == ''
assert model.test4 is None or model.test4 == ''
url = '/admin/model1/delete/?id=%s' % model.id
rv = client.post(url)
assert rv.status_code == 302
assert Model1.select().count() == 0
def test_column_editable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
# wtf-peewee doesn't automatically add length validators for max_length
form_args = {'test1': {'validators': [validators.Length(max=20)]}}
view = CustomModelView(Model1, column_editable_list=['test1'],
form_args=form_args)
admin.add_view(view)
fill_db(Model1, Model2)
client = app.test_client()
# Test in-line edit field rendering
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
assert 'data-role="x-editable"' in data
# Form - Test basic in-line edit functionality
rv = client.post('/admin/model1/ajax/update/', data={
'list_form_pk': '1',
'test1': 'change-success-1',
})
data = rv.data.decode('utf-8')
assert 'Record was successfully saved.' == data
# ensure the value has changed
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
assert 'change-success-1' in data
# Test validation error
rv = client.post('/admin/model1/ajax/update/', data={
'list_form_pk': '1',
'test1': ('longerthantwentycharacterslongerthantwentycharacterslonger'
'thantwentycharacterslongerthantwentycharacters'),
})
data = rv.data.decode('utf-8')
assert rv.status_code == 500
# Test invalid primary key
rv = client.post('/admin/model1/ajax/update/', data={
'list_form_pk': '1000',
'test1': 'problematic-input',
})
data = rv.data.decode('utf-8')
assert rv.status_code == 500
# Test editing column not in column_editable_list
rv = client.post('/admin/model1/ajax/update/', data={
'list_form_pk': '1',
'test2': 'problematic-input',
})
data = rv.data.decode('utf-8')
assert 'problematic-input' not in data
# Test in-line editing for relations
view = CustomModelView(Model2, column_editable_list=['model1'])
admin.add_view(view)
rv = client.post('/admin/model2/ajax/update/', data={
'list_form_pk': '1',
'model1': '3',
})
data = rv.data.decode('utf-8')
assert 'Record was successfully saved.' == data
# confirm the value has changed
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
assert 'test1_val_3' in data
def test_details_view():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view_no_details = CustomModelView(Model1)
admin.add_view(view_no_details)
# fields are scaffolded
view_w_details = CustomModelView(Model2, can_view_details=True)
admin.add_view(view_w_details)
# show only specific fields in details w/ column_details_list
char_field_view = CustomModelView(Model2, can_view_details=True,
column_details_list=["char_field"],
endpoint="cf_view")
admin.add_view(char_field_view)
fill_db(Model1, Model2)
client = app.test_client()
# ensure link to details is hidden when can_view_details is disabled
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
assert '/admin/model1/details/' not in data
# ensure link to details view appears
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
assert '/admin/model2/details/' in data
# test redirection when details are disabled
rv = client.get('/admin/model1/details/?url=%2Fadmin%2Fmodel1%2F&id=3')
assert rv.status_code == 302
# test if correct data appears in details view when enabled
rv = client.get('/admin/model2/details/?url=%2Fadmin%2Fmodel2%2F&id=3')
data = rv.data.decode('utf-8')
assert 'Char Field' in data
assert 'char_field_val_3' in data
assert 'Int Field' in data
assert '5000' in data
# test column_details_list
rv = client.get('/admin/cf_view/details/?url=%2Fadmin%2Fcf_view%2F&id=3')
data = rv.data.decode('utf-8')
assert 'Char Field' in data
assert 'char_field_val_3' in data
assert 'Int Field' not in data
assert '5000' not in data
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
fill_db(Model1, Model2)
# Test string filter
view = CustomModelView(Model1, column_filters=['test1'])
admin.add_view(view)
assert len(view._filters) == 7
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Test1']] == \
[
(0, 'contains'),
(1, 'not contains'),
(2, 'equals'),
(3, 'not equal'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
]
# Make some test clients
client = app.test_client()
# string - equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test2_val_1' in data
assert 'test1_val_2' not in data
# string - not equal
rv = client.get('/admin/model1/?flt0_1=test1_val_1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test2_val_1' not in data
assert 'test1_val_2' in data
# string - contains
rv = client.get('/admin/model1/?flt0_2=test1_val_1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test2_val_1' in data
assert 'test1_val_2' not in data
# string - not contains
rv = client.get('/admin/model1/?flt0_3=test1_val_1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test2_val_1' not in data
assert 'test1_val_2' in data
# string - empty
rv = client.get('/admin/model1/?flt0_4=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'empty_obj' in data
assert 'test1_val_1' not in data
assert 'test1_val_2' not in data
# string - not empty
rv = client.get('/admin/model1/?flt0_4=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'empty_obj' not in data
assert 'test1_val_1' in data
assert 'test1_val_2' in data
# string - in list
rv = client.get('/admin/model1/?flt0_5=test1_val_1%2Ctest1_val_2')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test2_val_1' in data
assert 'test2_val_2' in data
assert 'test1_val_3' not in data
assert 'test1_val_4' not in data
# string - not in list
rv = client.get('/admin/model1/?flt0_6=test1_val_1%2Ctest1_val_2')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test2_val_1' not in data
assert 'test2_val_2' not in data
assert 'test1_val_3' in data
assert 'test1_val_4' in data
# Test int filter
view = CustomModelView(Model2, column_filters=['int_field'])
admin.add_view(view)
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']] == \
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
]
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' in data
assert 'char_field_val_4' not in data
# integer - equals (huge number)
rv = client.get('/admin/model2/?flt0_0=6169453081680413441')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_5' in data
assert 'char_field_val_4' not in data
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'Invalid Filter Value' in data
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' not in data
assert 'char_field_val_4' in data
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' not in data
assert 'char_field_val_4' in data
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' in data
assert 'char_field_val_4' not in data
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' in data
assert 'char_field_val_2' in data
assert 'char_field_val_3' not in data
assert 'char_field_val_4' not in data
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_2' not in data
assert 'char_field_val_3' in data
assert 'char_field_val_4' in data
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_2' not in data
assert 'char_field_val_3' in data
assert 'char_field_val_4' in data
# integer - in list (huge number)
rv = client.get('/admin/model2/?flt0_5=6169453081680413441')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_5' in data
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'Invalid Filter Value' in data
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' in data
assert 'char_field_val_2' in data
assert 'char_field_val_3' not in data
assert 'char_field_val_4' not in data
# Test boolean filter
view = CustomModelView(Model2, column_filters=['bool_field'],
endpoint="_bools")
admin.add_view(view)
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Bool Field']] == \
[
(0, 'equals'),
(1, 'not equal'),
]
# boolean - equals - Yes
rv = client.get('/admin/_bools/?flt0_0=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' in data
assert 'char_field_val_2' not in data
assert 'char_field_val_3' not in data
# boolean - equals - No
rv = client.get('/admin/_bools/?flt0_0=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_2' in data
assert 'char_field_val_3' in data
# boolean - not equals - Yes
rv = client.get('/admin/_bools/?flt0_1=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_2' in data
assert 'char_field_val_3' in data
# boolean - not equals - No
rv = client.get('/admin/_bools/?flt0_1=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' in data
assert 'char_field_val_2' not in data
assert 'char_field_val_3' not in data
# Test float filter
view = CustomModelView(Model2, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']] == \
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
]
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' in data
assert 'char_field_val_4' not in data
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'Invalid Filter Value' in data
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' not in data
assert 'char_field_val_4' in data
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' not in data
assert 'char_field_val_4' in data
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_3' in data
assert 'char_field_val_4' not in data
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' in data
assert 'char_field_val_2' in data
assert 'char_field_val_3' not in data
assert 'char_field_val_4' not in data
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_2' not in data
assert 'char_field_val_3' in data
assert 'char_field_val_4' in data
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' not in data
assert 'char_field_val_2' not in data
assert 'char_field_val_3' in data
assert 'char_field_val_4' in data
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'Invalid Filter Value' in data
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'char_field_val_1' in data
assert 'char_field_val_2' in data
assert 'char_field_val_3' not in data
assert 'char_field_val_4' not in data
# Test date, time, and datetime filters
view = CustomModelView(Model1,
column_filters=['date_field', 'datetime_field', 'timeonly_field'],
endpoint="_datetime")
admin.add_view(view)
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Date Field']] == \
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
]
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']] == \
[
(7, 'equals'),
(8, 'not equal'),
(9, 'greater than'),
(10, 'smaller than'),
(11, 'between'),
(12, 'not between'),
(13, 'empty'),
]
assert \
[(f['index'], f['operation']) for f in view._filter_groups[u'Timeonly Field']] == \
[
(14, 'equals'),
(15, 'not equal'),
(16, 'greater than'),
(17, 'smaller than'),
(18, 'between'),
(19, 'not between'),
(20, 'empty'),
]
# date - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-11-17')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'date_obj1' in data
assert 'date_obj2' not in data
# date - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-11-17')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'date_obj1' not in data
assert 'date_obj2' in data
# date - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-11-16')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'date_obj1' in data
assert 'date_obj2' not in data
# date - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-11-16')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'date_obj1' not in data
assert 'date_obj2' in data
# date - between
rv = client.get('/admin/_datetime/?flt0_4=2014-11-13+to+2014-11-20')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'date_obj1' in data
assert 'date_obj2' not in data
# date - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-11-13+to+2014-11-20')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'date_obj1' not in data
assert 'date_obj2' in data
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test1_val_1' in data
assert 'date_obj1' not in data
assert 'date_obj2' not in data
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test1_val_1' not in data
assert 'date_obj1' in data
assert 'date_obj2' in data
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_7=2014-04-03+01%3A09%3A00')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'datetime_obj1' in data
assert 'datetime_obj2' not in data
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_8=2014-04-03+01%3A09%3A00')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'datetime_obj1' not in data
assert 'datetime_obj2' in data
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_9=2014-04-03+01%3A08%3A00')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'datetime_obj1' in data
assert 'datetime_obj2' not in data
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_10=2014-04-03+01%3A08%3A00')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'datetime_obj1' not in data
assert 'datetime_obj2' in data
# datetime - between
rv = client.get('/admin/_datetime/?flt0_11=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'datetime_obj1' in data
assert 'datetime_obj2' not in data
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_12=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'datetime_obj1' not in data
assert 'datetime_obj2' in data
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_13=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test1_val_1' in data
assert 'datetime_obj1' not in data
assert 'datetime_obj2' not in data
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_13=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test1_val_1' not in data
assert 'datetime_obj1' in data
assert 'datetime_obj2' in data
# time - equals
rv = client.get('/admin/_datetime/?flt0_14=11%3A10%3A09')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'timeonly_obj1' in data
assert 'timeonly_obj2' not in data
# time - not equal
rv = client.get('/admin/_datetime/?flt0_15=11%3A10%3A09')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'timeonly_obj1' not in data
assert 'timeonly_obj2' in data
# time - greater
rv = client.get('/admin/_datetime/?flt0_16=11%3A09%3A09')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'timeonly_obj1' in data
assert 'timeonly_obj2' not in data
# time - smaller
rv = client.get('/admin/_datetime/?flt0_17=11%3A09%3A09')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'timeonly_obj1' not in data
assert 'timeonly_obj2' in data
# time - between
rv = client.get('/admin/_datetime/?flt0_18=10%3A40%3A00+to+11%3A50%3A59')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'timeonly_obj1' in data
assert 'timeonly_obj2' not in data
# time - not between
rv = client.get('/admin/_datetime/?flt0_19=10%3A40%3A00+to+11%3A50%3A59')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'timeonly_obj1' not in data
assert 'timeonly_obj2' in data
# time - empty
rv = client.get('/admin/_datetime/?flt0_20=1')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test1_val_1' in data
assert 'timeonly_obj1' not in data
assert 'timeonly_obj2' not in data
# time - not empty
rv = client.get('/admin/_datetime/?flt0_20=0')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'test1_val_1' not in data
assert 'timeonly_obj1' in data
assert 'timeonly_obj2' in data
def test_default_sort():
app, db, admin = setup()
M1, _ = create_models(db)
M1('c', 1).save()
M1('b', 1).save()
M1('a', 2).save()
assert M1.select().count() == 3
view = CustomModelView(M1, column_default_sort='test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
assert data[0].test1 == 'a'
assert data[1].test1 == 'b'
assert data[2].test1 == 'c'
# test default sort with multiple columns
order = [('test2', False), ('test1', False)]
view2 = CustomModelView(M1, column_default_sort=order, endpoint='m1_2')
admin.add_view(view2)
_, data = view2.get_list(0, None, None, None, None)
assert len(data) == 3
assert data[0].test1 == 'b'
assert data[1].test1 == 'c'
assert data[2].test1 == 'a'
def test_extra_fields():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
assert rv.status_code == 200
# Check presence and order
data = rv.data.decode('utf-8')
assert 'Extra Field' in data
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
assert pos2 < pos1
def test_custom_form_base():
app, db, admin = setup()
class TestForm(form.BaseForm):
pass
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_base_class=TestForm
)
admin.add_view(view)
assert hasattr(view._create_form_class, 'test1')
create_form = view.create_form()
assert isinstance(create_form, TestForm)
def test_form_args():
app, db, admin = setup()
class BaseModel(peewee.Model):
class Meta:
database = db
class Model(BaseModel):
test = peewee.CharField(null=False)
Model.create_table()
shared_form_args = {'test': {'validators': [validators.Regexp('test')]}}
view = CustomModelView(Model, form_args=shared_form_args)
admin.add_view(view)
# ensure shared field_args don't create duplicate validators
create_form = view.create_form()
assert len(create_form.test.validators) == 2
edit_form = view.edit_form()
assert len(edit_form.test.validators) == 2
def test_ajax_fk():
app, db, admin = setup()
class BaseModel(peewee.Model):
class Meta:
database = db
class Model1(BaseModel):
test1 = peewee.CharField(max_length=20)
test2 = peewee.CharField(max_length=20)
def __str__(self):
return self.test1
class Model2(BaseModel):
model1 = peewee.ForeignKeyField(Model1)
Model1.create_table()
Model2.create_table()
view = CustomModelView(
Model2,
url='view',
form_ajax_refs={
'model1': {
'fields': ('test1', 'test2')
}
}
)
admin.add_view(view)
assert u'model1' in view._form_ajax_refs
model = Model1(test1=u'first', test2=u'')
model.save()
model2 = Model1(test1=u'foo', test2=u'bar')
model2.save()
# Check loader
loader = view._form_ajax_refs[u'model1']
mdl = loader.get_one(model.id)
assert mdl.test1 == model.test1
items = loader.get_list(u'fir')
assert len(items) == 1
assert items[0].id == model.id
items = loader.get_list(u'bar')
assert len(items) == 1
assert items[0].test1 == u'foo'
# Check form generation
form = view.create_form()
assert form.model1.__class__.__name__ == u'AjaxSelectField'
with app.test_request_context('/admin/view/'):
assert u'value=""' not in form.model1()
form.model1.data = model
assert (u'data-json="[%s, "first"]"' % as_unicode(model.id) in form.model1() or
u'data-json="[%s, "first"]"' % as_unicode(model.id))
assert u'value="%s"' % as_unicode(model.id) in form.model1()
# Check querying
client = app.test_client()
req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo')
assert req.data == b'[[%d, "foo"]]' % model2.id
# Check submitting
client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = Model2.select().first()
assert mdl is not None
assert mdl.model1 is not None
assert mdl.model1.id == model.id
assert mdl.model1.test1 == u'first'
def test_export_csv():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, can_export=True,
column_list=['test1', 'test2'], export_max_rows=2,
endpoint='row_limit_2')
admin.add_view(view)
for x in range(5):
fill_db(Model1, Model2)
client = app.test_client()
# test export_max_rows
rv = client.get('/admin/row_limit_2/export/csv/')
data = rv.data.decode('utf-8')
assert rv.status_code == 200
assert "Test1,Test2\r\n" + \
"test1_val_1,test2_val_1\r\n" + \
"test1_val_2,test2_val_2\r\n" == data
view = CustomModelView(Model1, can_export=True,
column_list=['test1', 'test2'],
endpoint='no_row_limit')
admin.add_view(view)
# test row limit without export_max_rows
rv = client.get('/admin/no_row_limit/export/csv/')
data = rv.data.decode('utf-8')
assert rv.status_code == 200
assert len(data.splitlines()) > 21
| |
#!/usr/bin/env python3
import sys
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict, deque, OrderedDict
from random import shuffle, choice
class Graph():
"""This class provides the basic functionality for graph handling."""
def __init__(self):
"""Initialize the different containers for nodes, edges and dists."""
self.nodes = set()
self.edges = defaultdict(list)
self.distances = {}
def addNode(self, value):
"""Add a node to the graph."""
self.nodes.add(value)
def addEdge(self, from_node, to_node, distance):
"""
Store the connection between node A and node B with a certain distance.
"""
self.edges[from_node].append(to_node)
self.edges[to_node].append(from_node)
self.distances[(from_node, to_node)] = distance
# self.distances[(to_node, from_node)] = -distance
def getNeighboursOf(self, node):
"""
Find the direct neighbours of a given node. Might be usefull for larger
sets of nodes with fewer interconnections.
"""
neighbours = []
for a, b in self.distances.keys():
if a == node:
neighbours.append(b)
return neighbours
class Dijsktra():
"""
Provides mainly two functions to receive the path from Dijkstras algortihm:
dijkstra, getPath. The rest is performance.
"""
def __init__(self, filename, start):
"""
Initialise the Graph class and start running the script.
Parameters
----------
filename : graph file to extract the path from
start : the node to begin with [default = 1]
(graph : internally instanciated)
Example
-------
d = Dijkstra('world666.gph', 1)
d.run()
"""
self.graph = Graph()
self.filename = filename
self.start = start
def run(self):
"""
Execute the task:
- Read in graph file
- search paths with dijkstra
- search for the path
"""
N, conns, nodesA, nodesB, weight = self.readGPH(self.filename)
print("{} nodes with {} connections".format(N, conns))
# prepare the graph with nodes and edges
[self.graph.addNode(i+1) for i in range(N)]
[self.graph.addEdge(nodesA[i], nodesB[i], weight[i]) for i in range(len(nodesA))]
# nodes_taken = []
path_taken = []
dist_taken = []
# search all paths
self.visited, self.paths = self.dijkstra(self.start)
# print("dijkstra finished")
# print(nodesB, len(nodesB), len(set(nodesB)))
# sys.exit()
dist = list(self.visited.values())
idx = dist.index(max(dist))
max_dist = dist[idx]
node = list(self.visited.keys())[idx]
print(max_dist, node)
# for i in self.visited.keys():
# if i == self.start:
# continue
# # now find for every node the longest way to the start node
# self.getPath()
# track = self.getPath2(self.start, 23)
# print(track)
# print(self.visited.keys())
# nodes_taken.append(len(path))
# path_taken.append(track)
# dist_taken.append(visited[i])
# max_dist = max(dist_taken)
# idx = dist_taken.index(max_dist)
# # prompt output
# print("RESULT VERTEX {}".format(path_taken[idx][-1]))
# print("RESULT DIST {}".format(max_dist))
def readGPH(self, filename):
"""
Read and process a gph-file.
Parameters
----------
(filename : graph file to extract the path from)
"""
# get the first line with n nodes and n connections
with open(filename, mode='r') as gph:
for line in gph:
header = line.split(' ')
if len(header) == 2:
nodes_ = int(header[0])
conns = int(header[1])
break
# reopen and get all edges with respective weights
data = np.genfromtxt(filename, dtype=int, comments='#', delimiter=' ', skip_header=1)
return nodes_, conns, data[:, 0], data[:, 1], data[:, 2]
def dijkstra(self, source):
"""
Scan the given graph from source.
Parameters
----------
(source : the source node where the search starts)
"""
unvisited = set(self.graph.nodes)
visited = {source: 0}
paths = {}
while unvisited:
min_node = None
for node in unvisited:
if node in visited:
if min_node is None:
min_node = node
elif visited[node] < visited[min_node]:
min_node = node
if min_node is None:
break
unvisited.remove(min_node)
# for edge in graph.getNeighboursOf(min_node):
for edge in self.graph.edges[min_node]: # less elegant but faster
try:
weight = visited[min_node] + self.graph.distances[(min_node, edge)]
# edge_is_found = True
# print("{}-{} found".format(edge, min_node))
except KeyError:
# landing here if desired edge doesn't exist in distances
# print("{}-{} not found".format(min_node, edge))
continue
# if edge_is_found:
else:
if edge not in visited or weight < visited[edge]:
visited[edge] = weight
paths[edge] = min_node
# print(visited, paths)
# sys.exit()
return visited, paths
def getPath(self): # , source, dest):
"""Search for the path as given from dijkstra algorithm."""
# print(self.visited)
# print(self.paths)
# track = deque()
# # print(source, dest)
# destination = self.paths[dest]
#
# while destination != source:
# track.appendleft(destination)
# destination = self.paths[destination]
#
# track.appendleft(source)
# track.append(dest)
#
# return list(track)
def getPath2(self, source, dest):
"""
Search for the path as given from dijkstra algorithm.
Parameters
----------
source : start node
dest : end node
"""
track = deque()
# print(source, dest)
destination = self.paths[dest]
while destination != source:
track.appendleft(destination)
destination = self.paths[destination]
track.appendleft(source)
track.append(dest)
return list(track)
def main(argv):
"""Argument handler."""
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
'''\
EXERCISE 5
----------------------------
find the longest shortest
path with the dijkstra
algorithm in a set of nodes.
'''
)
parser.add_argument('file', help='graph file to search through', action='store')
parser.add_argument('-s', '--start', help='node name to start with', action='store', type=int, default=1, required=False)
args = parser.parse_args()
d = Dijsktra(args.file, args.start)
d.run()
if __name__ == '__main__':
main(sys.argv[1:])
| |
'''
Created on 1 Oct 2013
@author: david
Interface to obtain wavelet transforms and other measurements.
'''
from __future__ import division
import math
import numpy as np
from _cwt import _mwt, _rwt
WINDOW = 100
def morlet(t, W0=5.):
return np.exp(-0.5 * (t ** 2) - 1j * W0 * t)
def ricker(t, a):
factor = 2/(math.sqrt(3*a) * math.pi**(1/3))
norm_x = t/a
norm_x *= norm_x
return factor * (1 - norm_x) * np.exp(-norm_x)
wavel_by_name = {'m': morlet, 'morlet': morlet,
'r': ricker, 'ricker': ricker}
def cwt(x, psi, scales):
'''Continuous Wavelet Transform, general case'''
N, M = len(scales), len(x)
out = np.empty((N, M), dtype=np.complex128)
t = np.arange(M)
for i, s in enumerate(scales):
for tau in xrange(M):
out[i, tau] = np.trapz(psi((t - tau) / s) * x) / np.sqrt(s)
# TODO: select only a window of values.
return out
def mwt(x, scales):
'''Perform Morlet wavelet transform
Input:
-----
x : 1-D np.ndarray, float32 or float64. The data to be transformed.
scales: 1-D np.ndarray, float32 or float64. Scales at which to perform
the transformation. If it is not one of the valid types, it will
be safely casted.
Returns:
-------
out : the cwt of the x input.
'''
if scales.dtype is not x.dtype:
# FIXME: I have not get the grasp of fused types yet.
return _mwt(x.astype(np.float64, casting='same_kind'),
scales.astype(np.float64, casting='same_kind'))
return _mwt(x, scales)
def rwt(x, scales):
'''Perform Ricker wavelet transform
Input:
-----
x : 1-D np.ndarray, float32 or float64. The data to be transformed.
scales: 1-D np.ndarray, float32 or float64. Scales at which to perform
the transformation. If it is not one of the valid types, it will
be safely casted.
Returns:
-------
out : the cwt of the x input.
'''
if scales.dtype is not x.dtype:
return _rwt(x.astype(np.float64, casting='same_kind'),
scales.astype(np.float64, casting='same_kind'))
return _rwt(x, scales)
def wavel_W(signal, wavel, scales):
'''Compute wavelet W transform.
Input:
signal
wavel: name of the wavelet
'''
wvl = wavel_by_name[wavel]
if wvl is morlet: return mwt(signal, scales)
if wvl is ricker: return rwt(signal, scales)
return cwt(signal, wvl, scales)
# ======================================================================
# From this point forward, all are convenience functions based
# on wavel_W
# ======================================================================
def move_integral(arr, window):
'''Perform the integration over a window. The input is padded with 0s
before and after. The output keeps the same shape as the input.
TODO: cythonice
TODO: improve accuracy via better integration.
FIXME: window has to be even.
TODO: window as a function of the scale.
'''
if window == 0:
return arr # Do nothing.
assert window % 2 == 0
out = np.zeros_like(arr)
newarr = np.zeros((arr.shape[0], arr.shape[1] + window), dtype=arr.dtype)
newarr[:, window / 2:-window / 2] = arr
for i in xrange(out.shape[1]):
out[:, i] = np.trapz(newarr[:, i:i + window], axis=1)
return out
def wavel_P(signalW, window=WINDOW):
'''Compute the wavelet power distribution
'''
return move_integral(np.abs(signalW * signalW), window)
def wavel_C(signalW1, signalW2, window=WINDOW):
'''Compute the wavelet cross-spectrum of two signals
'''
return move_integral(np.conj(signalW1) * signalW2, window)
def wavel_rho(signalC12, signalP1, signalP2):
'''Compute the wavelet time-dependent scale-dependent correlation given the
power distributions and the cross-spectrum
'''
return np.abs(signalC12) / np.sqrt(np.abs(signalP1 * signalP2))
def correlate_signals(signal1, signal2, wavel='morlet', scales=[1, 3, 5],
window=WINDOW):
'''Compute directly the wavelet correlation between two signals
'''
W1 = wavel_W(signal1, wavel, scales)
W2 = wavel_W(signal2, wavel, scales)
return correlate_wavelets(W1, W2, window)
def correlate_wavelets(W1, W2, window=WINDOW):
'''Compute the time correlation between two signals.
TODO: optimise memory usage.
'''
num = np.abs(move_integral(np.conj(W1) * W2, window))
den = np.sqrt(np.abs(move_integral(np.abs(W1 * W1), window) *
move_integral(np.abs(W2 * W2), window)))
return np.where(den != 0.0, num / den, 0)
def phase_correlate_signals(signal1, signal2, wavel='morlet', scales=[1, 3, 5],
window=WINDOW):
'''Compute the phase correlation between two signals
Calls ```phase_correlate_wavelets```
'''
W1 = wavel_W(signal1, wavel, scales)
W2 = wavel_W(signal2, wavel, scales)
return phase_correlate_wavelets(W1, W2, window)
def phase_correlate_wavelets(W1, W2, window=WINDOW):
'''Compute the phase correlation between two wavelets.
The computation is done from the angle of the complex value of the
wavelets and forcing zeros where the input is zero.
The results are now nicely normalised.
'''
integrand = np.exp(1j * (np.angle(W1.conj()) + np.angle(W2)))
integrand = np.where(W1 != 0, integrand, 0) # No signal, no coherence
integrand = np.where(W2 != 0, integrand, 0)
assert not np.any(np.isnan(integrand))
try: norm = 1 / window
except ZeroDivisionError: norm = 1
return norm * np.abs(move_integral(integrand, window))
def co_spectrum_signals(signal1, signal2, wavel='morlet', scales=[1, 3, 5],
window=WINDOW, norm=True):
'''Compute the co-spectrum of two signals.
The output is a tuple (real, imag)
norm: Whether or not normalise the result
'''
W1 = wavel_W(signal1, wavel, scales)
W2 = wavel_W(signal2, wavel, scales)
res = W1.conj() * W2
if norm is True:
den = np.sqrt(np.abs(move_integral(np.abs(W1 * W1), window) *
move_integral(np.abs(W2 * W2), window)))
out1 = np.where(den != 0, np.real(res) / den, 0)
out2 = np.where(den != 0, np.imag(res) / den, 0)
return out1, out2
if norm is False:
return np.real(res), np.imag(res)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/apply-policy/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for routing policy
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__import_policy",
"__default_import_policy",
"__export_policy",
"__default_export_policy",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
self.__export_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__default_export_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"apply-policy",
"state",
]
def _get_import_policy(self):
"""
Getter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/import_policy (leafref)
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
return self.__import_policy
def _set_import_policy(self, v, load=False):
"""
Setter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/import_policy (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_import_policy() directly.
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """import_policy must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_import_policy(self):
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_default_import_policy(self):
"""
Getter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_import_policy (default-policy-type)
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
return self.__default_import_policy
def _set_default_import_policy(self, v, load=False):
"""
Setter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_import_policy (default-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_import_policy() directly.
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_import_policy must be of a type compatible with default-policy-type""",
"defined-type": "openconfig-network-instance:default-policy-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCEPT_ROUTE': {}, 'REJECT_ROUTE': {}},), default=six.text_type("REJECT_ROUTE"), is_leaf=True, yang_name="default-import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='default-policy-type', is_config=False)""",
}
)
self.__default_import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_default_import_policy(self):
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
def _get_export_policy(self):
"""
Getter method for export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/export_policy (leafref)
YANG Description: list of policy names in sequence to be applied on
sending a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
return self.__export_policy
def _set_export_policy(self, v, load=False):
"""
Setter method for export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/export_policy (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_export_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_export_policy() directly.
YANG Description: list of policy names in sequence to be applied on
sending a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """export_policy must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="export-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__export_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_export_policy(self):
self.__export_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_default_export_policy(self):
"""
Getter method for default_export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_export_policy (default-policy-type)
YANG Description: explicitly set a default policy if no policy definition
in the export policy chain is satisfied.
"""
return self.__default_export_policy
def _set_default_export_policy(self, v, load=False):
"""
Setter method for default_export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_export_policy (default-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_export_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_export_policy() directly.
YANG Description: explicitly set a default policy if no policy definition
in the export policy chain is satisfied.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_export_policy must be of a type compatible with default-policy-type""",
"defined-type": "openconfig-network-instance:default-policy-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCEPT_ROUTE': {}, 'REJECT_ROUTE': {}},), default=six.text_type("REJECT_ROUTE"), is_leaf=True, yang_name="default-export-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='default-policy-type', is_config=False)""",
}
)
self.__default_export_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_default_export_policy(self):
self.__default_export_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
import_policy = __builtin__.property(_get_import_policy)
default_import_policy = __builtin__.property(_get_default_import_policy)
export_policy = __builtin__.property(_get_export_policy)
default_export_policy = __builtin__.property(_get_default_export_policy)
_pyangbind_elements = OrderedDict(
[
("import_policy", import_policy),
("default_import_policy", default_import_policy),
("export_policy", export_policy),
("default_export_policy", default_export_policy),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/apply-policy/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for routing policy
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__import_policy",
"__default_import_policy",
"__export_policy",
"__default_export_policy",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
self.__export_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
self.__default_export_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"apply-policy",
"state",
]
def _get_import_policy(self):
"""
Getter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/import_policy (leafref)
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
return self.__import_policy
def _set_import_policy(self, v, load=False):
"""
Setter method for import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/import_policy (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_import_policy() directly.
YANG Description: list of policy names in sequence to be applied on
receiving a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """import_policy must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_import_policy(self):
self.__import_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_default_import_policy(self):
"""
Getter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_import_policy (default-policy-type)
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
return self.__default_import_policy
def _set_default_import_policy(self, v, load=False):
"""
Setter method for default_import_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_import_policy (default-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_import_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_import_policy() directly.
YANG Description: explicitly set a default policy if no policy definition
in the import policy chain is satisfied.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_import_policy must be of a type compatible with default-policy-type""",
"defined-type": "openconfig-network-instance:default-policy-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCEPT_ROUTE': {}, 'REJECT_ROUTE': {}},), default=six.text_type("REJECT_ROUTE"), is_leaf=True, yang_name="default-import-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='default-policy-type', is_config=False)""",
}
)
self.__default_import_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_default_import_policy(self):
self.__default_import_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-import-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
def _get_export_policy(self):
"""
Getter method for export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/export_policy (leafref)
YANG Description: list of policy names in sequence to be applied on
sending a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
return self.__export_policy
def _set_export_policy(self, v, load=False):
"""
Setter method for export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/export_policy (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_export_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_export_policy() directly.
YANG Description: list of policy names in sequence to be applied on
sending a routing update in the current context, e.g.,
for the current peer group, neighbor, address family,
etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """export_policy must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=six.text_type), is_leaf=False, yang_name="export-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""",
}
)
self.__export_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_export_policy(self):
self.__export_policy = YANGDynClass(
base=TypedListType(allowed_type=six.text_type),
is_leaf=False,
yang_name="export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=False,
)
def _get_default_export_policy(self):
"""
Getter method for default_export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_export_policy (default-policy-type)
YANG Description: explicitly set a default policy if no policy definition
in the export policy chain is satisfied.
"""
return self.__default_export_policy
def _set_default_export_policy(self, v, load=False):
"""
Setter method for default_export_policy, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/apply_policy/state/default_export_policy (default-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_export_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_export_policy() directly.
YANG Description: explicitly set a default policy if no policy definition
in the export policy chain is satisfied.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_export_policy must be of a type compatible with default-policy-type""",
"defined-type": "openconfig-network-instance:default-policy-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCEPT_ROUTE': {}, 'REJECT_ROUTE': {}},), default=six.text_type("REJECT_ROUTE"), is_leaf=True, yang_name="default-export-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='default-policy-type', is_config=False)""",
}
)
self.__default_export_policy = t
if hasattr(self, "_set"):
self._set()
def _unset_default_export_policy(self):
self.__default_export_policy = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"ACCEPT_ROUTE": {}, "REJECT_ROUTE": {}},
),
default=six.text_type("REJECT_ROUTE"),
is_leaf=True,
yang_name="default-export-policy",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="default-policy-type",
is_config=False,
)
import_policy = __builtin__.property(_get_import_policy)
default_import_policy = __builtin__.property(_get_default_import_policy)
export_policy = __builtin__.property(_get_export_policy)
default_export_policy = __builtin__.property(_get_default_export_policy)
_pyangbind_elements = OrderedDict(
[
("import_policy", import_policy),
("default_import_policy", default_import_policy),
("export_policy", export_policy),
("default_export_policy", default_export_policy),
]
)
| |
import unittest
from unittest import mock
from reportlab.pdfbase import pdfmetrics
from ogre.pdf.font import register_fonts_with_unicode_glyphs
from ogre.pdf.font import Font
from ogre.pdf.font import FontFamily
from ogre.pdf.font import FontWeight
from ogre.pdf.font import FontStyle
from ogre.pdf.font import FontRenderMode
class TestFont(unittest.TestCase):
def setUp(self):
self.font = Font()
@classmethod
def tearDownClass(cls):
register_fonts_with_unicode_glyphs()
def test_should_not_allow_for_dynamic_attributes(self):
with self.assertRaises(AttributeError):
Font().size = 12
@mock.patch('ogre.pdf.font.register_fonts_with_unicode_glyphs')
def test_should_register_fonts_on_first_instantiation(self, mock_register):
unregister_custom_fonts()
Font()
self.assertTrue(mock_register.called)
self.assertTrue(Font.registered)
@mock.patch('ogre.pdf.font.register_fonts_with_unicode_glyphs')
def test_should_not_register_fonts_on_subsequent_instantiations(self, mock_register):
unregister_custom_fonts()
Font()
Font()
self.assertEqual(1, len(mock_register.mock_calls))
def test_should_have_default_properties(self):
font = Font()
self.assertEqual('FreeSerif', font.name)
self.assertEqual(FontFamily.SERIF, font.family)
self.assertEqual(FontWeight.NORMAL, font.weight)
self.assertEqual(FontStyle.NORMAL, font.style)
self.assertEqual(FontRenderMode.FILL, font.render_mode)
self.assertEqual(9.6, font.ascent_pts)
self.assertEqual(12.0, font.size_pts)
self.assertAlmostEqual(4.23, font.size_mm, places=2)
self.assertEqual(0.0, font.rise_pts)
self.assertEqual(0.0, font.rise_mm)
self.assertEqual(0.0, font.char_space_pts)
self.assertEqual(0.0, font.char_space_mm)
self.assertEqual(0.0, font.word_space_pts)
self.assertEqual(0.0, font.word_space_mm)
self.assertEqual(1.2, font.leading)
self.assertAlmostEqual(14.4, font.leading_pts)
self.assertAlmostEqual(5.08, font.leading_mm)
def test_font_name_should_be_read_only(self):
with self.assertRaises(AttributeError):
self.font.name = 'Times New Roman'
def test_should_return_font_name_normal(self):
self.font.weight = FontWeight.NORMAL
self.font.style = FontStyle.NORMAL
self.font.family = FontFamily.MONO
self.assertEqual('FreeMono', self.font.name)
self.font.family = FontFamily.SANS
self.assertEqual('FreeSans', self.font.name)
self.font.family = FontFamily.SERIF
self.assertEqual('FreeSerif', self.font.name)
def test_should_return_font_name_bold(self):
self.font.weight = FontWeight.BOLD
self.font.style = FontStyle.NORMAL
self.font.family = FontFamily.MONO
self.assertEqual('FreeMonoBold', self.font.name)
self.font.family = FontFamily.SANS
self.assertEqual('FreeSansBold', self.font.name)
self.font.family = FontFamily.SERIF
self.assertEqual('FreeSerifBold', self.font.name)
def test_should_return_font_name_italic(self):
self.font.weight = FontWeight.NORMAL
self.font.style = FontStyle.ITALIC
self.font.family = FontFamily.MONO
self.assertEqual('FreeMonoItalic', self.font.name)
self.font.family = FontFamily.SANS
self.assertEqual('FreeSansItalic', self.font.name)
self.font.family = FontFamily.SERIF
self.assertEqual('FreeSerifItalic', self.font.name)
def test_should_return_font_name_bold_italic(self):
self.font.weight = FontWeight.BOLD
self.font.style = FontStyle.ITALIC
self.font.family = FontFamily.MONO
self.assertEqual('FreeMonoBoldItalic', self.font.name)
self.font.family = FontFamily.SANS
self.assertEqual('FreeSansBoldItalic', self.font.name)
self.font.family = FontFamily.SERIF
self.assertEqual('FreeSerifBoldItalic', self.font.name)
def test_should_set_font_family(self):
self.font.family = FontFamily.SANS
self.assertEqual(FontFamily.SANS, self.font.family)
def test_should_reject_wrong_data_type_for_font_family(self):
with self.assertRaises(AssertionError):
self.font.family = 'this is not an enum'
def test_should_set_font_weight(self):
self.font.weight = FontWeight.BOLD
self.assertEqual(FontWeight.BOLD, self.font.weight)
def test_should_reject_wrong_data_type_for_font_weight(self):
with self.assertRaises(AssertionError):
self.font.weight = 'this is not an enum'
def test_should_set_font_style(self):
self.font.style = FontStyle.ITALIC
self.assertEqual(FontStyle.ITALIC, self.font.style)
def test_should_reject_wrong_data_type_for_font_style(self):
with self.assertRaises(AssertionError):
self.font.style = 'this is not an enum'
def test_should_set_font_render_mode(self):
self.font.render_mode = FontRenderMode.CLIPPING
self.assertEqual(FontRenderMode.CLIPPING, self.font.render_mode)
def test_should_reject_wrong_data_type_for_font_render_mode(self):
with self.assertRaises(AssertionError):
self.font.render_mode = 'this is not an enum'
def test_ascent_should_be_read_only(self):
with self.assertRaises(AttributeError):
self.font.ascent_pts = 10.0
def test_should_set_size_pts(self):
self.font.size_pts = 15
self.assertEqual(15, self.font.size_pts)
self.assertAlmostEqual(5.29, self.font.size_mm, places=2)
def test_should_set_size_mm(self):
self.font.size_mm = 5.29
self.assertAlmostEqual(15, self.font.size_pts, places=0)
self.assertAlmostEqual(5.29, self.font.size_mm, places=2)
def test_should_set_rise_pts(self):
self.font.rise_pts = 15
self.assertEqual(15, self.font.rise_pts)
self.assertAlmostEqual(5.29, self.font.rise_mm, places=2)
def test_should_set_rise_mm(self):
self.font.rise_mm = 5.29
self.assertAlmostEqual(15, self.font.rise_pts, places=0)
self.assertAlmostEqual(5.29, self.font.rise_mm, places=2)
def test_should_set_char_space_pts(self):
self.font.char_space_pts = 15
self.assertEqual(15, self.font.char_space_pts)
self.assertAlmostEqual(5.29, self.font.char_space_mm, places=2)
def test_should_set_char_space_mm(self):
self.font.char_space_mm = 5.29
self.assertAlmostEqual(15, self.font.char_space_pts, places=0)
self.assertAlmostEqual(5.29, self.font.char_space_mm, places=2)
def test_should_set_word_space_pts(self):
self.font.word_space_pts = 15
self.assertEqual(15, self.font.word_space_pts)
self.assertAlmostEqual(5.29, self.font.word_space_mm, places=2)
def test_should_set_word_space_mm(self):
self.font.word_space_mm = 5.29
self.assertAlmostEqual(15, self.font.word_space_pts, places=0)
self.assertAlmostEqual(5.29, self.font.word_space_mm, places=2)
def test_should_set_leading(self):
self.font.leading = 1.5
self.assertEqual(1.5, self.font.leading)
self.assertAlmostEqual(18, self.font.leading_pts, places=0)
self.assertAlmostEqual(6.35, self.font.leading_mm, places=2)
def test_should_get_interline(self):
self.font.leading = 1.2
self.font.size_pts = 12
self.assertAlmostEqual(2.4, self.font.interline_pts)
self.assertAlmostEqual(0.85, self.font.interline_mm, places=2)
def unregister_custom_fonts():
for name in list(pdfmetrics._fonts.keys())[:]:
if name not in ('Symbol', 'ZapfDingbats'):
del pdfmetrics._fonts[name]
Font.registered = False
| |
# -*- test-case-name: twisted.python.test.test_url -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.url}.
"""
from __future__ import unicode_literals
from ..url import URL
unicode = type(u'')
from twisted.trial.unittest import SynchronousTestCase
theurl = "http://www.foo.com/a/nice/path/?zot=23&zut"
# Examples from RFC 3986 section 5.4, Reference Resolution Examples
relativeLinkBaseForRFC3986 = 'http://a/b/c/d;p?q'
relativeLinkTestsForRFC3986 = [
# "Normal"
#('g:h', 'g:h'), # Not supported: scheme with relative path
('g', 'http://a/b/c/g'),
('./g', 'http://a/b/c/g'),
('g/', 'http://a/b/c/g/'),
('/g', 'http://a/g'),
('//g', 'http://g'),
('?y', 'http://a/b/c/d;p?y'),
('g?y', 'http://a/b/c/g?y'),
('#s', 'http://a/b/c/d;p?q#s'),
('g#s', 'http://a/b/c/g#s'),
('g?y#s', 'http://a/b/c/g?y#s'),
(';x', 'http://a/b/c/;x'),
('g;x', 'http://a/b/c/g;x'),
('g;x?y#s', 'http://a/b/c/g;x?y#s'),
('', 'http://a/b/c/d;p?q'),
('.', 'http://a/b/c/'),
('./', 'http://a/b/c/'),
('..', 'http://a/b/'),
('../', 'http://a/b/'),
('../g', 'http://a/b/g'),
('../..', 'http://a/'),
('../../', 'http://a/'),
('../../g', 'http://a/g'),
# Abnormal examples
# ".." cannot be used to change the authority component of a URI.
('../../../g', 'http://a/g'),
('../../../../g', 'http://a/g'),
# Only include "." and ".." when they are only part of a larger segment,
# not by themselves.
('/./g', 'http://a/g'),
('/../g', 'http://a/g'),
('g.', 'http://a/b/c/g.'),
('.g', 'http://a/b/c/.g'),
('g..', 'http://a/b/c/g..'),
('..g', 'http://a/b/c/..g'),
# Unnecessary or nonsensical forms of "." and "..".
('./../g', 'http://a/b/g'),
('./g/.', 'http://a/b/c/g/'),
('g/./h', 'http://a/b/c/g/h'),
('g/../h', 'http://a/b/c/h'),
('g;x=1/./y', 'http://a/b/c/g;x=1/y'),
('g;x=1/../y', 'http://a/b/c/y'),
# Separating the reference's query and fragment components from the path.
('g?y/./x', 'http://a/b/c/g?y/./x'),
('g?y/../x', 'http://a/b/c/g?y/../x'),
('g#s/./x', 'http://a/b/c/g#s/./x'),
('g#s/../x', 'http://a/b/c/g#s/../x'),
# Not supported: scheme with relative path
#("http:g", "http:g"), # strict
#("http:g", "http://a/b/c/g"), # non-strict
]
_percentenc = lambda s: ''.join('%%%02X' % ord(c) for c in s)
class TestURL(SynchronousTestCase):
"""
Tests for L{URL}.
"""
def assertUnicoded(self, u):
"""
The given L{URL}'s components should be L{unicode}.
@param u: The L{URL} to test.
"""
self.assertTrue(isinstance(u.scheme, unicode)
or u.scheme is None, repr(u))
self.assertTrue(isinstance(u.host, unicode)
or u.host is None, repr(u))
for seg in u.path:
self.assertIsInstance(seg, unicode, repr(u))
for (k, v) in u.query:
self.assertIsInstance(k, unicode, repr(u))
self.assertTrue(v is None or isinstance(v, unicode), repr(u))
self.assertIsInstance(u.fragment, unicode, repr(u))
def assertURL(self, u, scheme, host, path, query,
fragment, port, userinfo=u''):
"""
The given L{URL} should have the given components.
@param u: The actual L{URL} to examine.
@param scheme: The expected scheme.
@param host: The expected host.
@param path: The expected path.
@param query: The expected query.
@param fragment: The expected fragment.
@param port: The expected port.
@param userinfo: The expected userinfo.
"""
actual = (u.scheme, u.host, u.path, u.query,
u.fragment, u.port, u.userinfo)
expected = (scheme, host, tuple(path), tuple(query),
fragment, port, u.userinfo)
self.assertEqual(actual, expected)
def test_initDefaults(self):
"""
L{URL} should have appropriate default values.
"""
def check(u):
self.assertUnicoded(u)
self.assertURL(u, u'http', u'', [], [], u'', 80, u'')
check(URL(u'http', u''))
check(URL(u'http', u'', [], []))
check(URL(u'http', u'', [], [], u''))
def test_init(self):
"""
L{URL} should accept L{unicode} parameters.
"""
u = URL(u's', u'h', [u'p'], [(u'k', u'v'), (u'k', None)], u'f')
self.assertUnicoded(u)
self.assertURL(u, u's', u'h', [u'p'], [(u'k', u'v'), (u'k', None)],
u'f', None)
self.assertURL(URL(u'http', u'\xe0', [u'\xe9'],
[(u'\u03bb', u'\u03c0')], u'\u22a5'),
u'http', u'\xe0', [u'\xe9'],
[(u'\u03bb', u'\u03c0')], u'\u22a5', 80)
def test_initPercent(self):
"""
L{URL} should accept (and not interpret) percent characters.
"""
u = URL(u's', u'%68', [u'%70'], [(u'%6B', u'%76'), (u'%6B', None)],
u'%66')
self.assertUnicoded(u)
self.assertURL(u,
u's', u'%68', [u'%70'],
[(u'%6B', u'%76'), (u'%6B', None)],
u'%66', None)
def test_repr(self):
"""
L{URL.__repr__} will display the canonical form of the URL, wrapped in
a L{URL.fromText} invocation, so that it is C{eval}-able but still easy
to read.
"""
self.assertEqual(
repr(URL(scheme=u'http', host=u'foo', path=[u'bar'],
query=[(u'baz', None), (u'k', u'v')],
fragment=u'frob')),
"URL.from_text(%s)" % (repr(u"http://foo/bar?baz&k=v#frob"),)
)
def test_fromText(self):
"""
Round-tripping L{URL.fromText} with C{str} results in an equivalent
URL.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(theurl, urlpath.asText())
def test_roundtrip(self):
"""
L{URL.asText} should invert L{URL.fromText}.
"""
tests = (
"http://localhost",
"http://localhost/",
"http://localhost/foo",
"http://localhost/foo/",
"http://localhost/foo!!bar/",
"http://localhost/foo%20bar/",
"http://localhost/foo%2Fbar/",
"http://localhost/foo?n",
"http://localhost/foo?n=v",
"http://localhost/foo?n=/a/b",
"http://example.com/foo!@$bar?b!@z=123",
"http://localhost/asd?a=asd%20sdf/345",
"http://(%2525)/(%2525)?(%2525)&(%2525)=(%2525)#(%2525)",
"http://(%C3%A9)/(%C3%A9)?(%C3%A9)&(%C3%A9)=(%C3%A9)#(%C3%A9)",
)
for test in tests:
result = URL.fromText(test).asText()
self.assertEqual(test, result)
def test_equality(self):
"""
Two URLs decoded using L{URL.fromText} will be equal (C{==}) if they
decoded same URL string, and unequal (C{!=}) if they decoded different
strings.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(urlpath, URL.fromText(theurl))
self.assertNotEqual(
urlpath,
URL.fromText('ftp://www.anotherinvaliddomain.com/'
'foo/bar/baz/?zot=21&zut')
)
def test_fragmentEquality(self):
"""
An URL created with the empty string for a fragment compares equal
to an URL created with an unspecified fragment.
"""
self.assertEqual(URL(fragment=u''), URL())
self.assertEqual(URL.fromText(u"http://localhost/#"),
URL.fromText(u"http://localhost/"))
def test_child(self):
"""
L{URL.child} appends a new path segment, but does not affect the query
or fragment.
"""
urlpath = URL.fromText(theurl)
self.assertEqual("http://www.foo.com/a/nice/path/gong?zot=23&zut",
urlpath.child(u'gong').asText())
self.assertEqual("http://www.foo.com/a/nice/path/gong%2F?zot=23&zut",
urlpath.child(u'gong/').asText())
self.assertEqual(
"http://www.foo.com/a/nice/path/gong%2Fdouble?zot=23&zut",
urlpath.child(u'gong/double').asText()
)
self.assertEqual(
"http://www.foo.com/a/nice/path/gong%2Fdouble%2F?zot=23&zut",
urlpath.child(u'gong/double/').asText()
)
def test_multiChild(self):
"""
L{URL.child} receives multiple segments as C{*args} and appends each in
turn.
"""
self.assertEqual(URL.fromText('http://example.com/a/b')
.child('c', 'd', 'e').asText(),
'http://example.com/a/b/c/d/e')
def test_childInitRoot(self):
"""
L{URL.child} of a L{URL} without a path produces a L{URL} with a single
path segment.
"""
childURL = URL(host=u"www.foo.com").child(u"c")
self.assertTrue(childURL.rooted)
self.assertEqual("http://www.foo.com/c", childURL.asText())
def test_sibling(self):
"""
L{URL.sibling} of a L{URL} replaces the last path segment, but does not
affect the query or fragment.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(
"http://www.foo.com/a/nice/path/sister?zot=23&zut",
urlpath.sibling(u'sister').asText()
)
# Use an url without trailing '/' to check child removal.
theurl2 = "http://www.foo.com/a/nice/path?zot=23&zut"
urlpath = URL.fromText(theurl2)
self.assertEqual(
"http://www.foo.com/a/nice/sister?zot=23&zut",
urlpath.sibling(u'sister').asText()
)
def test_click(self):
"""
L{URL.click} interprets the given string as a relative URI-reference
and returns a new L{URL} interpreting C{self} as the base absolute URI.
"""
urlpath = URL.fromText(theurl)
# A null uri should be valid (return here).
self.assertEqual("http://www.foo.com/a/nice/path/?zot=23&zut",
urlpath.click("").asText())
# A simple relative path remove the query.
self.assertEqual("http://www.foo.com/a/nice/path/click",
urlpath.click("click").asText())
# An absolute path replace path and query.
self.assertEqual("http://www.foo.com/click",
urlpath.click("/click").asText())
# Replace just the query.
self.assertEqual("http://www.foo.com/a/nice/path/?burp",
urlpath.click("?burp").asText())
# One full url to another should not generate '//' between authority.
# and path
self.assertNotIn("//foobar",
urlpath.click('http://www.foo.com/foobar').asText())
# From a url with no query clicking a url with a query, the query
# should be handled properly.
u = URL.fromText('http://www.foo.com/me/noquery')
self.assertEqual('http://www.foo.com/me/17?spam=158',
u.click('/me/17?spam=158').asText())
# Check that everything from the path onward is removed when the click
# link has no path.
u = URL.fromText('http://localhost/foo?abc=def')
self.assertEqual(u.click('http://www.python.org').asText(),
'http://www.python.org')
def test_clickRFC3986(self):
"""
L{URL.click} should correctly resolve the examples in RFC 3986.
"""
base = URL.fromText(relativeLinkBaseForRFC3986)
for (ref, expected) in relativeLinkTestsForRFC3986:
self.assertEqual(base.click(ref).asText(), expected)
def test_clickSchemeRelPath(self):
"""
L{URL.click} should not accept schemes with relative paths.
"""
base = URL.fromText(relativeLinkBaseForRFC3986)
self.assertRaises(NotImplementedError, base.click, 'g:h')
self.assertRaises(NotImplementedError, base.click, 'http:h')
def test_cloneUnchanged(self):
"""
Verify that L{URL.replace} doesn't change any of the arguments it
is passed.
"""
urlpath = URL.fromText('https://x:1/y?z=1#A')
self.assertEqual(
urlpath.replace(urlpath.scheme,
urlpath.host,
urlpath.path,
urlpath.query,
urlpath.fragment,
urlpath.port),
urlpath)
self.assertEqual(
urlpath.replace(),
urlpath)
def test_clickCollapse(self):
"""
L{URL.click} collapses C{.} and C{..} according to RFC 3986 section
5.2.4.
"""
tests = [
['http://localhost/', '.', 'http://localhost/'],
['http://localhost/', '..', 'http://localhost/'],
['http://localhost/a/b/c', '.', 'http://localhost/a/b/'],
['http://localhost/a/b/c', '..', 'http://localhost/a/'],
['http://localhost/a/b/c', './d/e', 'http://localhost/a/b/d/e'],
['http://localhost/a/b/c', '../d/e', 'http://localhost/a/d/e'],
['http://localhost/a/b/c', '/./d/e', 'http://localhost/d/e'],
['http://localhost/a/b/c', '/../d/e', 'http://localhost/d/e'],
['http://localhost/a/b/c/', '../../d/e/',
'http://localhost/a/d/e/'],
['http://localhost/a/./c', '../d/e', 'http://localhost/d/e'],
['http://localhost/a/./c/', '../d/e', 'http://localhost/a/d/e'],
['http://localhost/a/b/c/d', './e/../f/../g',
'http://localhost/a/b/c/g'],
['http://localhost/a/b/c', 'd//e', 'http://localhost/a/b/d//e'],
]
for start, click, expected in tests:
actual = URL.fromText(start).click(click).asText()
self.assertEqual(
actual,
expected,
"{start}.click({click}) => {actual} not {expected}".format(
start=start,
click=repr(click),
actual=actual,
expected=expected,
)
)
def test_queryAdd(self):
"""
L{URL.add} adds query parameters.
"""
self.assertEqual(
"http://www.foo.com/a/nice/path/?foo=bar",
URL.fromText("http://www.foo.com/a/nice/path/")
.add(u"foo", u"bar").asText())
self.assertEqual(
"http://www.foo.com/?foo=bar",
URL(host=u"www.foo.com").add(u"foo", u"bar")
.asText())
urlpath = URL.fromText(theurl)
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp",
urlpath.add(u"burp").asText())
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp=xxx",
urlpath.add(u"burp", u"xxx").asText())
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp=xxx&zing",
urlpath.add(u"burp", u"xxx").add(u"zing").asText())
# Note the inversion!
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&zing&burp=xxx",
urlpath.add(u"zing").add(u"burp", u"xxx").asText())
# Note the two values for the same name.
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp=xxx&zot=32",
urlpath.add(u"burp", u"xxx").add(u"zot", u'32')
.asText())
def test_querySet(self):
"""
L{URL.set} replaces query parameters by name.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=32&zut",
urlpath.set(u"zot", u'32').asText())
# Replace name without value with name/value and vice-versa.
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot&zut=itworked",
urlpath.set(u"zot").set(u"zut", u"itworked").asText()
)
# Q: what happens when the query has two values and we replace?
# A: we replace both values with a single one
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=32&zut",
urlpath.add(u"zot", u"xxx").set(u"zot", u'32').asText()
)
def test_queryRemove(self):
"""
L{URL.remove} removes all instances of a query parameter.
"""
url = URL.fromText(u"https://example.com/a/b/?foo=1&bar=2&foo=3")
self.assertEqual(
url.remove(u"foo"),
URL.fromText(u"https://example.com/a/b/?bar=2")
)
def test_parseEqualSignInParamValue(self):
"""
Every C{=}-sign after the first in a query parameter is simply included
in the value of the parameter.
"""
u = URL.fromText('http://localhost/?=x=x=x')
self.assertEqual(u.get(u''), ['x=x=x'])
self.assertEqual(u.asText(), 'http://localhost/?=x%3Dx%3Dx')
u = URL.fromText('http://localhost/?foo=x=x=x&bar=y')
self.assertEqual(u.query, (('foo', 'x=x=x'),
('bar', 'y')))
self.assertEqual(u.asText(), 'http://localhost/?foo=x%3Dx%3Dx&bar=y')
def test_empty(self):
"""
An empty L{URL} should serialize as the empty string.
"""
self.assertEqual(URL().asText(), u'')
def test_justQueryText(self):
"""
An L{URL} with query text should serialize as just query text.
"""
u = URL(query=[(u"hello", u"world")])
self.assertEqual(u.asText(), u'?hello=world')
def test_identicalEqual(self):
"""
L{URL} compares equal to itself.
"""
u = URL.fromText('http://localhost/')
self.assertEqual(u, u)
def test_similarEqual(self):
"""
URLs with equivalent components should compare equal.
"""
u1 = URL.fromText('http://localhost/')
u2 = URL.fromText('http://localhost/')
self.assertEqual(u1, u2)
def test_differentNotEqual(self):
"""
L{URL}s that refer to different resources are both unequal (C{!=}) and
also not equal (not C{==}).
"""
u1 = URL.fromText('http://localhost/a')
u2 = URL.fromText('http://localhost/b')
self.assertFalse(u1 == u2, "%r != %r" % (u1, u2))
self.assertNotEqual(u1, u2)
def test_otherTypesNotEqual(self):
"""
L{URL} is not equal (C{==}) to other types.
"""
u = URL.fromText('http://localhost/')
self.assertFalse(u == 42, "URL must not equal a number.")
self.assertFalse(u == object(), "URL must not equal an object.")
self.assertNotEqual(u, 42)
self.assertNotEqual(u, object())
def test_identicalNotUnequal(self):
"""
Identical L{URL}s are not unequal (C{!=}) to each other.
"""
u = URL.fromText('http://localhost/')
self.assertFalse(u != u, "%r == itself" % u)
def test_similarNotUnequal(self):
"""
Structurally similar L{URL}s are not unequal (C{!=}) to each other.
"""
u1 = URL.fromText('http://localhost/')
u2 = URL.fromText('http://localhost/')
self.assertFalse(u1 != u2, "%r == %r" % (u1, u2))
def test_differentUnequal(self):
"""
Structurally different L{URL}s are unequal (C{!=}) to each other.
"""
u1 = URL.fromText('http://localhost/a')
u2 = URL.fromText('http://localhost/b')
self.assertTrue(u1 != u2, "%r == %r" % (u1, u2))
def test_otherTypesUnequal(self):
"""
L{URL} is unequal (C{!=}) to other types.
"""
u = URL.fromText('http://localhost/')
self.assertTrue(u != 42, "URL must differ from a number.")
self.assertTrue(u != object(), "URL must be differ from an object.")
def test_asURI(self):
"""
L{URL.asURI} produces an URI which converts any URI unicode encoding
into pure US-ASCII and returns a new L{URL}.
"""
unicodey = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}'
'?\N{LATIN SMALL LETTER A}\N{COMBINING ACUTE ACCENT}='
'\N{LATIN SMALL LETTER I}\N{COMBINING ACUTE ACCENT}'
'#\N{LATIN SMALL LETTER U}\N{COMBINING ACUTE ACCENT}')
iri = URL.fromText(unicodey)
uri = iri.asURI()
self.assertEqual(iri.host, '\N{LATIN SMALL LETTER E WITH ACUTE}.com')
self.assertEqual(iri.path[0],
'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}')
self.assertEqual(iri.asText(), unicodey)
expectedURI = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'
actualURI = uri.asText()
self.assertEqual(actualURI, expectedURI,
'%r != %r' % (actualURI, expectedURI))
def test_asIRI(self):
"""
L{URL.asIRI} decodes any percent-encoded text in the URI, making it
more suitable for reading by humans, and returns a new L{URL}.
"""
asciiish = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'
uri = URL.fromText(asciiish)
iri = uri.asIRI()
self.assertEqual(uri.host, 'xn--9ca.com')
self.assertEqual(uri.path[0], '%C3%A9')
self.assertEqual(uri.asText(), asciiish)
expectedIRI = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'\N{LATIN SMALL LETTER E WITH ACUTE}'
'?\N{LATIN SMALL LETTER A WITH ACUTE}='
'\N{LATIN SMALL LETTER I WITH ACUTE}'
'#\N{LATIN SMALL LETTER U WITH ACUTE}')
actualIRI = iri.asText()
self.assertEqual(actualIRI, expectedIRI,
'%r != %r' % (actualIRI, expectedIRI))
def test_badUTF8AsIRI(self):
"""
Bad UTF-8 in a path segment, query parameter, or fragment results in
that portion of the URI remaining percent-encoded in the IRI.
"""
urlWithBinary = 'http://xn--9ca.com/%00%FF/%C3%A9'
uri = URL.fromText(urlWithBinary)
iri = uri.asIRI()
expectedIRI = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'%00%FF/'
'\N{LATIN SMALL LETTER E WITH ACUTE}')
actualIRI = iri.asText()
self.assertEqual(actualIRI, expectedIRI,
'%r != %r' % (actualIRI, expectedIRI))
def test_alreadyIRIAsIRI(self):
"""
A L{URL} composed of non-ASCII text will result in non-ASCII text.
"""
unicodey = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}'
'?\N{LATIN SMALL LETTER A}\N{COMBINING ACUTE ACCENT}='
'\N{LATIN SMALL LETTER I}\N{COMBINING ACUTE ACCENT}'
'#\N{LATIN SMALL LETTER U}\N{COMBINING ACUTE ACCENT}')
iri = URL.fromText(unicodey)
alsoIRI = iri.asIRI()
self.assertEqual(alsoIRI.asText(), unicodey)
def test_alreadyURIAsURI(self):
"""
A L{URL} composed of encoded text will remain encoded.
"""
expectedURI = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'
uri = URL.fromText(expectedURI)
actualURI = uri.asURI().asText()
self.assertEqual(actualURI, expectedURI)
def test_userinfo(self):
"""
L{URL.fromText} will parse the C{userinfo} portion of the URI
separately from the host and port.
"""
url = URL.fromText(
'http://someuser:somepassword@example.com/some-segment@ignore'
)
self.assertEqual(url.authority(True),
'someuser:somepassword@example.com')
self.assertEqual(url.authority(False), 'someuser:@example.com')
self.assertEqual(url.userinfo, 'someuser:somepassword')
self.assertEqual(url.user, 'someuser')
self.assertEqual(url.asText(),
'http://someuser:@example.com/some-segment@ignore')
self.assertEqual(
url.replace(userinfo=u"someuser").asText(),
'http://someuser@example.com/some-segment@ignore'
)
def test_portText(self):
"""
L{URL.fromText} parses custom port numbers as integers.
"""
portURL = URL.fromText(u"http://www.example.com:8080/")
self.assertEqual(portURL.port, 8080)
self.assertEqual(portURL.asText(), u"http://www.example.com:8080/")
def test_mailto(self):
"""
Although L{URL} instances are mainly for dealing with HTTP, other
schemes (such as C{mailto:}) should work as well. For example,
L{URL.fromText}/L{URL.asText} round-trips cleanly for a C{mailto:} URL
representing an email address.
"""
self.assertEqual(URL.fromText(u"mailto:user@example.com").asText(),
u"mailto:user@example.com")
def test_queryIterable(self):
"""
When a L{URL} is created with a C{query} argument, the C{query}
argument is converted into an N-tuple of 2-tuples.
"""
url = URL(query=[[u'alpha', u'beta']])
self.assertEqual(url.query, ((u'alpha', u'beta'),))
def test_pathIterable(self):
"""
When a L{URL} is created with a C{path} argument, the C{path} is
converted into a tuple.
"""
url = URL(path=[u'hello', u'world'])
self.assertEqual(url.path, (u'hello', u'world'))
def test_invalidArguments(self):
"""
Passing an argument of the wrong type to any of the constructor
arguments of L{URL} will raise a descriptive L{TypeError}.
L{URL} typechecks very aggressively to ensure that its constitutent
parts are all properly immutable and to prevent confusing errors when
bad data crops up in a method call long after the code that called the
constructor is off the stack.
"""
class Unexpected(object):
def __str__(self):
return "wrong"
def __repr__(self):
return "<unexpected>"
defaultExpectation = "unicode" if bytes is str else "str"
def assertRaised(raised, expectation, name):
self.assertEqual(str(raised.exception),
"expected {} for {}, got {}".format(
expectation,
name, "<unexpected>"))
def check(param, expectation=defaultExpectation):
with self.assertRaises(TypeError) as raised:
URL(**{param: Unexpected()})
assertRaised(raised, expectation, param)
check("scheme")
check("host")
check("fragment")
check("rooted", "bool")
check("userinfo")
check("port", "int or NoneType")
with self.assertRaises(TypeError) as raised:
URL(path=[Unexpected(),])
assertRaised(raised, defaultExpectation, "path segment")
with self.assertRaises(TypeError) as raised:
URL(query=[(u"name", Unexpected()),])
assertRaised(raised, defaultExpectation + " or NoneType",
"query parameter value")
with self.assertRaises(TypeError) as raised:
URL(query=[(Unexpected(), u"value"),])
assertRaised(raised, defaultExpectation, "query parameter name")
# No custom error message for this one, just want to make sure
# non-2-tuples don't get through.
with self.assertRaises(TypeError):
URL(query=[Unexpected()])
with self.assertRaises(ValueError):
URL(query=[(u'k', u'v', u'vv')])
with self.assertRaises(ValueError):
URL(query=[(u'k',)])
url = URL.fromText("https://valid.example.com/")
with self.assertRaises(TypeError) as raised:
url.child(Unexpected())
assertRaised(raised, defaultExpectation, "path segment")
with self.assertRaises(TypeError) as raised:
url.sibling(Unexpected())
assertRaised(raised, defaultExpectation, "path segment")
with self.assertRaises(TypeError) as raised:
url.click(Unexpected())
assertRaised(raised, defaultExpectation, "relative URL")
def test_technicallyTextIsIterableBut(self):
"""
Technically, L{str} (or L{unicode}, as appropriate) is iterable, but
C{URL(path="foo")} resulting in C{URL.fromText("f/o/o")} is never what
you want.
"""
with self.assertRaises(TypeError) as raised:
URL(path=u'foo')
self.assertEqual(
str(raised.exception),
"expected iterable of text for path, not: {}"
.format(repr(u'foo'))
)
class URLDeprecationTests(SynchronousTestCase):
"""
L{twisted.python.constants} is deprecated.
"""
def test_urlDeprecation(self):
"""
L{twisted.python.constants} is deprecated since Twisted 17.5.0.
"""
from twisted.python import url
url
warningsShown = self.flushWarnings([self.test_urlDeprecation])
self.assertEqual(1, len(warningsShown))
self.assertEqual(
("twisted.python.url was deprecated in Twisted 17.5.0:"
" Please use hyperlink from PyPI instead."),
warningsShown[0]['message'])
| |
"""
desispec.io.frame
=================
I/O routines for Frame objects
"""
import os.path
import time
import numpy as np
import scipy, scipy.sparse
from astropy.io import fits
from astropy.table import Table
import warnings
from desiutil.depend import add_dependencies
from desiutil.log import get_logger
from ..frame import Frame
from .fibermap import read_fibermap
from .meta import findfile, get_nights, get_exposures
from .util import fitsheader, native_endian, makepath
from . import iotime
def write_frame(outfile, frame, header=None, fibermap=None, units=None):
"""Write a frame fits file and returns path to file written.
Args:
outfile: full path to output file, or tuple (night, expid, channel)
frame: desispec.frame.Frame object with wave, flux, ivar...
Optional:
header: astropy.io.fits.Header or dict to override frame.header
fibermap: table to store as FIBERMAP HDU
Returns:
full filepath of output file that was written
Note:
to create a Frame object to pass into write_frame,
frame = Frame(wave, flux, ivar, resolution_data)
"""
log = get_logger()
outfile = makepath(outfile, 'frame')
#- Ignore some known and harmless units warnings
import warnings
warnings.filterwarnings('ignore', message="'.*nanomaggies.* did not parse as fits unit.*")
warnings.filterwarnings('ignore', message=r".*'10\*\*6 arcsec.* did not parse as fits unit.*")
if header is not None:
hdr = fitsheader(header)
else:
hdr = fitsheader(frame.meta)
add_dependencies(hdr)
# Vet
diagnosis = frame.vet()
if diagnosis != 0:
raise IOError("Frame did not pass simple vetting test. diagnosis={:d}".format(diagnosis))
hdus = fits.HDUList()
x = fits.PrimaryHDU(frame.flux.astype('f4'), header=hdr)
x.header['EXTNAME'] = 'FLUX'
if units is not None:
units = str(units)
if 'BUNIT' in hdr and hdr['BUNIT'] != units:
log.warning('BUNIT {bunit} != units {units}; using {units}'.format(
bunit=hdr['BUNIT'], units=units))
x.header['BUNIT'] = units
hdus.append(x)
hdus.append( fits.ImageHDU(frame.ivar.astype('f4'), name='IVAR') )
# hdus.append( fits.CompImageHDU(frame.mask, name='MASK') )
hdus.append( fits.ImageHDU(frame.mask, name='MASK') )
hdus.append( fits.ImageHDU(frame.wave.astype('f8'), name='WAVELENGTH') )
hdus[-1].header['BUNIT'] = 'Angstrom'
if frame.resolution_data is not None:
hdus.append( fits.ImageHDU(frame.resolution_data.astype('f4'), name='RESOLUTION' ) )
elif frame.wsigma is not None:
log.debug("Using ysigma from qproc")
qrimg=fits.ImageHDU(frame.wsigma.astype('f4'), name='YSIGMA' )
qrimg.header["NDIAG"] =frame.ndiag
hdus.append(qrimg)
if fibermap is not None:
fibermap = Table(fibermap)
fibermap.meta['EXTNAME'] = 'FIBERMAP'
add_dependencies(fibermap.meta)
hdus.append( fits.convenience.table_to_hdu(fibermap) )
elif frame.fibermap is not None:
fibermap = Table(frame.fibermap)
fibermap.meta['EXTNAME'] = 'FIBERMAP'
hdus.append( fits.convenience.table_to_hdu(fibermap) )
elif frame.spectrograph is not None:
x.header['FIBERMIN'] = 500*frame.spectrograph # Hard-coded (as in desispec.frame)
else:
log.error("You are likely writing a frame without sufficient fiber info")
if frame.chi2pix is not None:
hdus.append( fits.ImageHDU(frame.chi2pix.astype('f4'), name='CHI2PIX' ) )
if frame.scores is not None :
scores_tbl = Table(frame.scores)
scores_tbl.meta['EXTNAME'] = 'SCORES'
hdus.append( fits.convenience.table_to_hdu(scores_tbl) )
if frame.scores_comments is not None : # add comments in header
hdu=hdus['SCORES']
for i in range(1,999):
key = 'TTYPE'+str(i)
if key in hdu.header:
value = hdu.header[key]
if value in frame.scores_comments.keys() :
hdu.header[key] = (value, frame.scores_comments[value])
t0 = time.time()
hdus.writeto(outfile+'.tmp', overwrite=True, checksum=True)
os.rename(outfile+'.tmp', outfile)
duration = time.time() - t0
log.info(iotime.format('write', outfile, duration))
return outfile
def read_meta_frame(filename, extname=0):
""" Load the meta information of a Frame
Args:
filename: path to a file
extname: int, optional; Extension for grabbing header info
Returns:
meta: dict or astropy.fits.header
"""
with fits.open(filename, uint=True, memmap=False) as fx:
hdr = fx[extname].header
return hdr
def read_frame(filename, nspec=None, skip_resolution=False):
"""Reads a frame fits file and returns its data.
Args:
filename: path to a file, or (night, expid, camera) tuple where
night = string YEARMMDD
expid = integer exposure ID
camera = b0, r1, .. z9
skip_resolution: bool, option
Speed up read time (>5x) by avoiding the Resolution matrix
Returns:
desispec.Frame object with attributes wave, flux, ivar, etc.
"""
log = get_logger()
#- check if filename is (night, expid, camera) tuple instead
if not isinstance(filename, str):
night, expid, camera = filename
filename = findfile('frame', night, expid, camera)
if not os.path.isfile(filename):
raise FileNotFoundError("cannot open"+filename)
t0 = time.time()
fx = fits.open(filename, uint=True, memmap=False)
hdr = fx[0].header
flux = native_endian(fx['FLUX'].data.astype('f8'))
ivar = native_endian(fx['IVAR'].data.astype('f8'))
wave = native_endian(fx['WAVELENGTH'].data.astype('f8'))
if 'MASK' in fx:
mask = native_endian(fx['MASK'].data)
else:
mask = None #- let the Frame object create the default mask
# Init
resolution_data=None
qwsigma=None
qndiag=None
fibermap = None
chi2pix = None
scores = None
scores_comments = None
if skip_resolution:
pass
elif 'RESOLUTION' in fx:
resolution_data = native_endian(fx['RESOLUTION'].data.astype('f8'))
elif 'QUICKRESOLUTION' in fx:
qr=fx['QUICKRESOLUTION'].header
qndiag =qr['NDIAG']
qwsigma=native_endian(fx['QUICKRESOLUTION'].data.astype('f4'))
if 'FIBERMAP' in fx:
fibermap = read_fibermap(filename)
else:
fibermap = None
if 'CHI2PIX' in fx:
chi2pix = native_endian(fx['CHI2PIX'].data.astype('f8'))
else:
chi2pix = None
if 'SCORES' in fx:
scores = fx['SCORES'].data
# I need to open the header to read the comments
scores_comments = dict()
head = fx['SCORES'].header
for i in range(1,len(scores.columns)+1) :
k='TTYPE'+str(i)
scores_comments[head[k]]=head.comments[k]
else:
scores = None
scores_comments = None
fx.close()
duration = time.time() - t0
log.info(iotime.format('read', filename, duration))
if nspec is not None:
flux = flux[0:nspec]
ivar = ivar[0:nspec]
if resolution_data is not None:
resolution_data = resolution_data[0:nspec]
else:
qwsigma=qwsigma[0:nspec]
if chi2pix is not None:
chi2pix = chi2pix[0:nspec]
if mask is not None:
mask = mask[0:nspec]
# return flux,ivar,wave,resolution_data, hdr
frame = Frame(wave, flux, ivar, mask, resolution_data, meta=hdr, fibermap=fibermap, chi2pix=chi2pix,
scores=scores,scores_comments=scores_comments,
wsigma=qwsigma,ndiag=qndiag, suppress_res_warning=skip_resolution)
# This Frame came from a file, so set that
frame.filename = os.path.abspath(filename)
# Vette
diagnosis = frame.vet()
if diagnosis != 0:
warnings.warn("Frame did not pass simple vetting test. diagnosis={:d}".format(diagnosis))
log.error("Frame did not pass simple vetting test. diagnosis={:d}".format(diagnosis))
# Return
return frame
def search_for_framefile(frame_file, specprod_dir=None):
""" Search for an input frame_file in the desispec redux hierarchy
Args:
frame_file: str
specprod_dir: str, optional
Returns:
mfile: str, full path to frame_file if found else raise error
"""
log=get_logger()
# Parse frame file
path, ifile = os.path.split(frame_file)
splits = ifile.split('-')
root = splits[0]
camera = splits[1]
fexposure = int(splits[2].split('.')[0])
# Loop on nights
nights = get_nights(specprod_dir=specprod_dir)
for night in nights:
for exposure in get_exposures(night, specprod_dir=specprod_dir):
if exposure == fexposure:
mfile = findfile(root, camera=camera, night=night, expid=exposure, specprod_dir=specprod_dir)
if os.path.isfile(mfile):
return mfile
else:
log.error("Expected file {:s} not found..".format(mfile))
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class WorkersCumulativeStatisticsList(ListResource):
""" """
def __init__(self, version, workspace_sid):
"""
Initialize the WorkersCumulativeStatisticsList
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace that contains the Workers
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsList
"""
super(WorkersCumulativeStatisticsList, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, }
def get(self):
"""
Constructs a WorkersCumulativeStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
"""
return WorkersCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
def __call__(self):
"""
Constructs a WorkersCumulativeStatisticsContext
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
"""
return WorkersCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.WorkersCumulativeStatisticsList>'
class WorkersCumulativeStatisticsPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the WorkersCumulativeStatisticsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The SID of the Workspace that contains the Workers
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsPage
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsPage
"""
super(WorkersCumulativeStatisticsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of WorkersCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsInstance
"""
return WorkersCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.WorkersCumulativeStatisticsPage>'
class WorkersCumulativeStatisticsContext(InstanceContext):
""" """
def __init__(self, version, workspace_sid):
"""
Initialize the WorkersCumulativeStatisticsContext
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace with the resource to fetch
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
"""
super(WorkersCumulativeStatisticsContext, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, }
self._uri = '/Workspaces/{workspace_sid}/Workers/CumulativeStatistics'.format(**self._solution)
def fetch(self, end_date=values.unset, minutes=values.unset,
start_date=values.unset, task_channel=values.unset):
"""
Fetch a WorkersCumulativeStatisticsInstance
:param datetime end_date: Only calculate statistics from on or before this date
:param unicode minutes: Only calculate statistics since this many minutes in the past
:param datetime start_date: Only calculate statistics from on or after this date
:param unicode task_channel: Only calculate cumulative statistics on this TaskChannel
:returns: Fetched WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsInstance
"""
params = values.of({
'EndDate': serialize.iso8601_datetime(end_date),
'Minutes': minutes,
'StartDate': serialize.iso8601_datetime(start_date),
'TaskChannel': task_channel,
})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return WorkersCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.WorkersCumulativeStatisticsContext {}>'.format(context)
class WorkersCumulativeStatisticsInstance(InstanceResource):
""" """
def __init__(self, version, payload, workspace_sid):
"""
Initialize the WorkersCumulativeStatisticsInstance
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsInstance
"""
super(WorkersCumulativeStatisticsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'start_time': deserialize.iso8601_datetime(payload.get('start_time')),
'end_time': deserialize.iso8601_datetime(payload.get('end_time')),
'activity_durations': payload.get('activity_durations'),
'reservations_created': deserialize.integer(payload.get('reservations_created')),
'reservations_accepted': deserialize.integer(payload.get('reservations_accepted')),
'reservations_rejected': deserialize.integer(payload.get('reservations_rejected')),
'reservations_timed_out': deserialize.integer(payload.get('reservations_timed_out')),
'reservations_canceled': deserialize.integer(payload.get('reservations_canceled')),
'reservations_rescinded': deserialize.integer(payload.get('reservations_rescinded')),
'workspace_sid': payload.get('workspace_sid'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'workspace_sid': workspace_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkersCumulativeStatisticsContext for this WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsContext
"""
if self._context is None:
self._context = WorkersCumulativeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def start_time(self):
"""
:returns: The beginning of the interval during which these statistics were calculated
:rtype: datetime
"""
return self._properties['start_time']
@property
def end_time(self):
"""
:returns: The end of the interval during which these statistics were calculated
:rtype: datetime
"""
return self._properties['end_time']
@property
def activity_durations(self):
"""
:returns: The minimum, average, maximum, and total time that Workers spent in each Activity
:rtype: dict
"""
return self._properties['activity_durations']
@property
def reservations_created(self):
"""
:returns: The total number of Reservations that were created
:rtype: unicode
"""
return self._properties['reservations_created']
@property
def reservations_accepted(self):
"""
:returns: The total number of Reservations that were accepted
:rtype: unicode
"""
return self._properties['reservations_accepted']
@property
def reservations_rejected(self):
"""
:returns: The total number of Reservations that were rejected
:rtype: unicode
"""
return self._properties['reservations_rejected']
@property
def reservations_timed_out(self):
"""
:returns: The total number of Reservations that were timed out
:rtype: unicode
"""
return self._properties['reservations_timed_out']
@property
def reservations_canceled(self):
"""
:returns: The total number of Reservations that were canceled
:rtype: unicode
"""
return self._properties['reservations_canceled']
@property
def reservations_rescinded(self):
"""
:returns: The total number of Reservations that were rescinded
:rtype: unicode
"""
return self._properties['reservations_rescinded']
@property
def workspace_sid(self):
"""
:returns: The SID of the Workspace that contains the Workers
:rtype: unicode
"""
return self._properties['workspace_sid']
@property
def url(self):
"""
:returns: The absolute URL of the Workers statistics resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self, end_date=values.unset, minutes=values.unset,
start_date=values.unset, task_channel=values.unset):
"""
Fetch a WorkersCumulativeStatisticsInstance
:param datetime end_date: Only calculate statistics from on or before this date
:param unicode minutes: Only calculate statistics since this many minutes in the past
:param datetime start_date: Only calculate statistics from on or after this date
:param unicode task_channel: Only calculate cumulative statistics on this TaskChannel
:returns: Fetched WorkersCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsInstance
"""
return self._proxy.fetch(
end_date=end_date,
minutes=minutes,
start_date=start_date,
task_channel=task_channel,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Taskrouter.V1.WorkersCumulativeStatisticsInstance {}>'.format(context)
| |
import abc
import errno
import os
import platform
import socket
import threading
import time
import traceback
import urlparse
import mozprocess
__all__ = ["SeleniumServer", "ChromeDriverServer",
"GeckoDriverServer", "ServoDriverServer",
"WebDriverServer"]
class WebDriverServer(object):
__metaclass__ = abc.ABCMeta
default_base_path = "/"
_used_ports = set()
def __init__(self, logger, binary, host="127.0.0.1", port=None,
base_path="", env=None, args=None):
if binary is None:
raise ValueError("WebDriver server binary must be given "
"to --webdriver-binary argument")
self.logger = logger
self.binary = binary
self.host = host
if base_path == "":
self.base_path = self.default_base_path
else:
self.base_path = base_path
self.env = os.environ.copy() if env is None else env
self._port = port
self._cmd = None
self._args = args if args is not None else []
self._proc = None
@abc.abstractmethod
def make_command(self):
"""Returns the full command for starting the server process as a list."""
def start(self, block=False):
try:
self._run(block)
except KeyboardInterrupt:
self.stop()
def _run(self, block):
self._cmd = self.make_command()
self._proc = mozprocess.ProcessHandler(
self._cmd,
processOutputLine=self.on_output,
env=self.env,
storeOutput=False)
try:
self._proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"WebDriver HTTP server executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for server to become accessible: %s" % self.url)
try:
wait_for_service((self.host, self.port))
except:
self.logger.error(
"WebDriver HTTP server was not accessible "
"within the timeout:\n%s" % traceback.format_exc())
raise
if block:
self._proc.wait()
def stop(self, force=False):
if self.is_alive:
return self._proc.kill()
return not self.is_alive
@property
def is_alive(self):
return hasattr(self._proc, "proc") and self._proc.poll() is None
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self._cmd))
@property
def pid(self):
if self._proc is not None:
return self._proc.pid
@property
def url(self):
return "http://%s:%i%s" % (self.host, self.port, self.base_path)
@property
def port(self):
if self._port is None:
self._port = self._find_next_free_port()
return self._port
@staticmethod
def _find_next_free_port():
port = get_free_port(4444, exclude=WebDriverServer._used_ports)
WebDriverServer._used_ports.add(port)
return port
class SeleniumServer(WebDriverServer):
default_base_path = "/wd/hub"
def make_command(self):
return ["java", "-jar", self.binary, "-port", str(self.port)] + self._args
class ChromeDriverServer(WebDriverServer):
default_base_path = "/wd/hub"
def __init__(self, logger, binary="chromedriver", port=None,
base_path="", args=None):
WebDriverServer.__init__(
self, logger, binary, port=port, base_path=base_path, args=args)
def make_command(self):
return [self.binary,
cmd_arg("port", str(self.port)),
cmd_arg("url-base", self.base_path) if self.base_path else ""] + self._args
class EdgeDriverServer(WebDriverServer):
def __init__(self, logger, binary="MicrosoftWebDriver.exe", port=None,
base_path="", host="localhost", args=None):
WebDriverServer.__init__(
self, logger, binary, host=host, port=port, args=args)
def make_command(self):
return [self.binary,
"--port=%s" % str(self.port)] + self._args
class GeckoDriverServer(WebDriverServer):
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
host="127.0.0.1", port=None, args=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env, args=args)
self.marionette_port = marionette_port
def make_command(self):
return [self.binary,
"--marionette-port", str(self.marionette_port),
"--host", self.host,
"--port", str(self.port)] + self._args
class ServoDriverServer(WebDriverServer):
def __init__(self, logger, binary="servo", binary_args=None, host="127.0.0.1", port=None):
env = os.environ.copy()
env["RUST_BACKTRACE"] = "1"
WebDriverServer.__init__(self, logger, binary, host=host, port=port, env=env)
self.binary_args = binary_args
def make_command(self):
command = [self.binary,
"--webdriver", str(self.port),
"--hard-fail",
"--headless"] + self._args
if self.binary_args:
command += self.binary_args
return command
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port(start_port, exclude=None):
"""Get the first port number after start_port (inclusive) that is
not currently bound.
:param start_port: Integer port number at which to start testing.
:param exclude: Set of port numbers to skip"""
port = start_port
while True:
if exclude and port in exclude:
port += 1
continue
s = socket.socket()
try:
s.bind(("127.0.0.1", port))
except socket.error:
port += 1
else:
return port
finally:
s.close()
def wait_for_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECONNREFUSED:
raise
else:
return True
finally:
so.close()
time.sleep(0.5)
raise socket.error("Service is unavailable: %s:%i" % addr)
| |
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.org/
#
# Author: Todd Giles (todd@lightshowpi.org)
#
# Initial commands implemented by Chris Usey (chris.usey@gmail.com)
"""Command definition file.
Enabled commands must be defined in the configuration file. Each command must also have a
matching function defined in this file with a name in the form 'def cmd_command_name(user, args)'.
For example, the command help would have a matching function definition for 'def cmd_help(user,
args)'. The user argument will include the cell number of the user who made the request (if the
command is received via sms) and the 'args' argument is a string containing all the text in the
command sent (e.g. an sms message) after the command name itself has already been stripped.
So following the 'help' example, if a user with the cell phone number 1 (123) 456-7890 texted
'help me please!!!' then the function cmd_help will be called with arguments user = '+11234567890:',
and args = ' me please!!!'.
The function should return a string that will then be sent back to the user in some fashion (e.g.
in a return text message if via sms). Return an empty string if you'd rather no message be sent in
response to the command.
To install the command, simply instantiate an instance of Command for the command you've created
(see examples below). Note that new commands may be defined in their own files if desired (i.e. no
need to define all commands in this file).
"""
import logging
import math
import re
import subprocess
# The base command class. The class keeps track of all commands instantiated, so to install a new
# command, simply instantiate a new instance of it.
class Command(object):
"""The base command class
This class keeps track of all commands instantiated, so to install
a new command, simply instantiate a new instance of that command.
"""
commands = {}
def __init__(self, name, func):
self.name = name.lower()
if self.name not in _CMD_NAMES:
raise ValueError(name + ' command not defined in configuration file')
if self.name in Command.commands:
logging.warn(name + 'command is defined more than once, using last definition')
self.func = func
Command.commands[self.name] = self
def execute(self, user, args):
"""
Execute this command for the specified user with given arguments,
returning a message to be sent to the user after the command has
finished
:param user: A specified user
:type user: str
:param args: function to execute
:type args: function
:return: response to an executed command
:rtype: str
"""
return self.func(user, args)
def execute(command, user):
"""
Attempt to execute a command for the specified user with given
arguments, returning a message to be sent to the user after
the command has finished
:param command: function, function to execute
:type command: function
:param user: string, specified user
:type user: str
:return: response to an executed command
:rtype: str
:raises: :ValueError: if command does not exist
"""
# Determine the name of the command and arguments from the full
# command (taking into account aliases).
name = ''
args = ''
for command_name in _CMD_NAMES:
if bool(re.match(command_name, command, re.I)):
name = command_name
args = command[len(command_name):]
else:
try:
for command_alias in cm.sms.get(command_name + '_aliases'):
if bool(re.match(command_alias, command, re.I)):
name = command_name
args = command[len(command_alias):]
break
except KeyError:
pass # No aliases defined, that's fine - keep looking
if name:
break
# If no command found, assume we're executing the default command
if not name:
name = cm.sms.default_command
args = command
# Verify this command is installed
if name not in Command.commands:
raise ValueError(name
+ ' command must be installed by calling Command(\''
+ name + '\', <handler>)')
# Verify the user has permission to execute this command
if not cm.has_permission(user, name):
return cm.sms.unauthorized_response.format(cmd=name, user=user)
# Check to see if the command issued should be throttled
if cm.is_throttle_exceeded(name, user):
return cm.sms.throttle_limit_reached_response.format(cmd=name, user=user)
# Execute the command
return Command.commands[name].execute(user, args.strip())
def cmd_help(*args):
"""
Returns a list of available commands for the requesting user.
:param args: [specified user, arguments for command]
:type args: list
:return: list of available commands that the current user has permission to execute
:rtype: str
"""
user = args[0]
help_msg = "Commands:\n"
for cmd in _CMD_NAMES:
if cm.has_permission(user, cmd):
cmd_description = cm.sms.get(cmd + '_description')
if cmd_description:
help_msg += cmd_description + "\n"
return help_msg
def cmd_list(*args):
"""Lists all the songs from the current playlist.
:param args: [specified user, arguments for command]
:type args: list
:return: list of songs
:rtype: list
"""
per_sms = cm.sms.list_songs_per_sms
per_page = cm.sms.list_songs_per_page
pages = int(math.ceil(float(len(cm.playlist)) / per_sms))
page = 1
if len(args) > 1 and args[1].isdigit():
page = int(args[1])
if page < 1 or page > pages:
return 'page # must be between 1 and ' + str(pages)
response = ['Vote by texting the song #:\n']
if page == 1:
response[0] += '(Showing 1-' + str(per_page) + ' of ' + str(len(cm.playlist)) + ')\n'
i_sms = 0
i_song = per_page * (page-1)
for song in cm.playlist[per_page * (page - 1):per_page * page]:
if i_sms > len(response) - 1:
response.append('')
response[i_sms] += str(1 + i_song) + ': ' + song[0] + '\n'
i_song += 1
if i_song % per_sms == 0:
i_sms += 1
if page < pages:
response[len(response) - 1] += '(Text "list ' + str(1 + page) + '" for more songs)'
return response
def cmd_play(*args):
"""Interrupts whatever is going on, and plays the requested song.
:param args: [specified user, arguments for command]
:type args: list
:return: play song response
:rtype: str
"""
args = args[1]
if len(args) == 0 or not args.isdigit():
cm.update_state('play_now', -1)
return 'Skipping straight ahead to the next show!'
else:
song = int(args)
if song < 1 or song > len(cm.playlist):
return 'Sorry, the song you requested ' + args + ' is out of range :('
else:
cm.update_state('play_now', song)
return '"' + cm.playlist[song - 1][0] + '" coming right up!'
def cmd_volume(*args):
"""Changes the system volume.
:param args: [specified user, arguments for command]
:type args: list
:return: volume request result
:rtype: str
"""
# Sanitize the input before passing to volume script
args = args[1]
if '-' in args:
sanitized_cmd = '-'
elif '+' in args:
sanitized_cmd = '+'
elif args.isdigit():
vol = int(args)
if vol < 0 or vol > 100:
return 'volume must be between 0 and 100'
sanitized_cmd = str(vol)
else:
return cm.sms.volume_description
# Execute the sanitized command and handle result
vol_script = cm.home_dir + '/bin/vol'
output, error = subprocess.Popen(vol_script + ' ' + sanitized_cmd,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if error:
logging.warn('volume request failed: ' + str(error))
return 'volume request failed'
else:
return 'volume = ' + str(output)
def cmd_vote(*args):
"""Casts a vote for the next song to be played
:param args: [specified user, arguments for command]
:type args: list
:return: unknown command response
:rtype: str
"""
user = args[0]
args = args[1]
if args.isdigit():
song_num = int(args)
if user != 'Me' and 0 < song_num <= len(cm.playlist):
song = cm.playlist[song_num - 1]
song[2].add(user)
logging.info('Song requested: ' + str(song))
return 'Thank you for requesting "' + song[0] \
+ '", we\'ll notify you when it starts!'
else:
return cm.sms.unknown_command_response
def start(config):
global cm, _CMD_NAMES
cm = config
_CMD_NAMES = cm.sms.commands
Command('help', cmd_help)
Command('list', cmd_list)
Command('play', cmd_play)
Command('volume', cmd_volume)
Command('vote', cmd_vote)
if __name__ == "__main__":
pass
| |
#!/usr/bin/env python
""" systimes() user and system timer implementations for use by
pybench.
This module implements various different strategies for measuring
performance timings. It tries to choose the best available method
based on the platforma and available tools.
On Windows, it is recommended to have the Mark Hammond win32
package installed. Alternatively, the Thomas Heller ctypes
packages can also be used.
On Unix systems, the standard resource module provides the highest
resolution timings. Unfortunately, it is not available on all Unix
platforms.
If no supported timing methods based on process time can be found,
the module reverts to the highest resolution wall-clock timer
instead. The system time part will then always be 0.0.
The module exports one public API:
def systimes():
Return the current timer values for measuring user and system
time as tuple of seconds (user_time, system_time).
Copyright (c) 2006, Marc-Andre Lemburg (mal@egenix.com). See the
documentation for further information on copyrights, or contact
the author. All Rights Reserved.
"""
import time, sys, struct
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# TODOs:
#
# * Add ctypes wrapper for new clock_gettime() real-time POSIX APIs;
# these will then provide nano-second resolution where available.
#
# * Add a function that returns the resolution of systimes()
# values, ie. systimesres().
#
### Choose an implementation
SYSTIMES_IMPLEMENTATION = None
USE_CTYPES_GETPROCESSTIMES = 'cytpes GetProcessTimes() wrapper'
USE_WIN32PROCESS_GETPROCESSTIMES = 'win32process.GetProcessTimes()'
USE_RESOURCE_GETRUSAGE = 'resource.getrusage()'
USE_PROCESS_TIME_CLOCK = 'time.clock() (process time)'
USE_WALL_TIME_CLOCK = 'time.clock() (wall-clock)'
USE_WALL_TIME_TIME = 'time.time() (wall-clock)'
if sys.platform[:3] == 'win':
# Windows platform
try:
import win32process
except ImportError:
try:
import ctypes
except ImportError:
# Use the wall-clock implementation time.clock(), since this
# is the highest resolution clock available on Windows
SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_CLOCK
else:
SYSTIMES_IMPLEMENTATION = USE_CTYPES_GETPROCESSTIMES
else:
SYSTIMES_IMPLEMENTATION = USE_WIN32PROCESS_GETPROCESSTIMES
else:
# All other platforms
try:
import resource
except ImportError:
pass
else:
SYSTIMES_IMPLEMENTATION = USE_RESOURCE_GETRUSAGE
# Fall-back solution
if SYSTIMES_IMPLEMENTATION is None:
# Check whether we can use time.clock() as approximation
# for systimes()
start = time.clock()
time.sleep(0.1)
stop = time.clock()
if stop - start < 0.001:
# Looks like time.clock() is usable (and measures process
# time)
SYSTIMES_IMPLEMENTATION = USE_PROCESS_TIME_CLOCK
else:
# Use wall-clock implementation time.time() since this provides
# the highest resolution clock on most systems
SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_TIME
### Implementations
def getrusage_systimes():
return resource.getrusage(resource.RUSAGE_SELF)[:2]
def process_time_clock_systimes():
return (time.clock(), 0.0)
def wall_clock_clock_systimes():
return (time.clock(), 0.0)
def wall_clock_time_systimes():
return (time.time(), 0.0)
# Number of clock ticks per second for the values returned
# by GetProcessTimes() on Windows.
#
# Note: Ticks returned by GetProcessTimes() are 100ns intervals on
# Windows XP. However, the process times are only updated with every
# clock tick and the frequency of these is somewhat lower: depending
# on the OS version between 10ms and 15ms. Even worse, the process
# time seems to be allocated to process currently running when the
# clock interrupt arrives, ie. it is possible that the current time
# slice gets accounted to a different process.
WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 1e7
def win32process_getprocesstimes_systimes():
d = win32process.GetProcessTimes(win32process.GetCurrentProcess())
return (d['UserTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND,
d['KernelTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND)
def ctypes_getprocesstimes_systimes():
creationtime = ctypes.c_ulonglong()
exittime = ctypes.c_ulonglong()
kerneltime = ctypes.c_ulonglong()
usertime = ctypes.c_ulonglong()
rc = ctypes.windll.kernel32.GetProcessTimes(
ctypes.windll.kernel32.GetCurrentProcess(),
ctypes.byref(creationtime),
ctypes.byref(exittime),
ctypes.byref(kerneltime),
ctypes.byref(usertime))
if not rc:
raise TypeError('GetProcessTimes() returned an error')
return (usertime.value / WIN32_PROCESS_TIMES_TICKS_PER_SECOND,
kerneltime.value / WIN32_PROCESS_TIMES_TICKS_PER_SECOND)
# Select the default for the systimes() function
if SYSTIMES_IMPLEMENTATION is USE_RESOURCE_GETRUSAGE:
systimes = getrusage_systimes
elif SYSTIMES_IMPLEMENTATION is USE_PROCESS_TIME_CLOCK:
systimes = process_time_clock_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_CLOCK:
systimes = wall_clock_clock_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_TIME:
systimes = wall_clock_time_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WIN32PROCESS_GETPROCESSTIMES:
systimes = win32process_getprocesstimes_systimes
elif SYSTIMES_IMPLEMENTATION is USE_CTYPES_GETPROCESSTIMES:
systimes = ctypes_getprocesstimes_systimes
else:
raise TypeError('no suitable systimes() implementation found')
def processtime():
""" Return the total time spent on the process.
This is the sum of user and system time as returned by
systimes().
"""
user, system = systimes()
return user + system
### Testing
def some_workload():
x = 0L
for i in xrange(10000000L):
x = x + 1L
def test_workload():
print 'Testing systimes() under load conditions'
t0 = systimes()
some_workload()
t1 = systimes()
print 'before:', t0
print 'after:', t1
print 'differences:', (t1[0] - t0[0], t1[1] - t0[1])
print
def test_idle():
print 'Testing systimes() under idle conditions'
t0 = systimes()
time.sleep(1)
t1 = systimes()
print 'before:', t0
print 'after:', t1
print 'differences:', (t1[0] - t0[0], t1[1] - t0[1])
print
if __name__ == '__main__':
print 'Using %s as timer' % SYSTIMES_IMPLEMENTATION
print
test_workload()
test_idle()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova import context
from nova import db
from nova import exception
from nova.objects import instance
from nova.objects import pci_device
from nova.tests.objects import test_objects
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'status': 'available'}
fake_db_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'product_id': 'p',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
}
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'product_id': 'p1',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
}
class _TestPciDeviceObject(object):
def _create_fake_instance(self):
self.inst = instance.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = pci_device.PciDeviceList()
def _create_fake_pci_device(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_create_pci_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'product_id', 'vendor_id',
'status', 'address', 'extra_info']))
def test_pci_device_extra_info(self):
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['k1'] = 'v1'
self.dev_dict['k2'] = 'v2'
self.pci_device = pci_device.PciDevice.create(self.dev_dict)
extra_value = self.pci_device.extra_info
self.assertEqual(extra_value.get('k1'), 'v1')
self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'address', 'product_id',
'vendor_id', 'status', 'extra_info']))
def test_update_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p2', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_update_device_same_value(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_get_by_dev_addr(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
self.assertRemotes()
def test_get_by_dev_id(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
self.assertRemotes()
def test_claim_device(self):
self._create_fake_instance()
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.claim(self.inst)
self.assertEqual(self.pci_device.status, 'claimed')
self.assertEqual(self.pci_device.instance_uuid,
'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 0)
def test_claim_device_fail(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.status = 'allocated'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.claim, self.inst)
def test_allocate_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
self.assertEqual(self.pci_device.status, 'allocated')
self.assertEqual(self.pci_device.instance_uuid, 'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 1)
self.assertEqual(self.inst.pci_devices[0]['vendor_id'], 'v')
self.assertEqual(self.inst.pci_devices[0]['status'], 'allocated')
def test_allocacte_device_fail_status(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.allocate,
self.inst)
def test_allocacte_device_fail_owner(self):
self._create_fake_instance()
self._create_fake_pci_device()
inst_2 = instance.Instance()
inst_2.uuid = 'fake-inst-uuid-2'
self.pci_device.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidOwner,
self.pci_device.allocate, inst_2)
def test_free_claimed_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.free(self.inst)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_free_allocated_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
self.assertEqual(len(self.inst.pci_devices), 1)
self.pci_device.free(self.inst)
self.assertEqual(len(self.inst.pci_devices), 0)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_free_device_fail(self):
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.free)
def test_remove_device(self):
self._create_fake_pci_device()
self.pci_device.remove()
self.assertEqual(self.pci_device.status, 'removed')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_remove_device_fail(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.remove)
def test_save(self):
ctxt = context.get_admin_context()
self._create_fake_pci_device()
return_dev = dict(fake_db_dev, status='available',
instance_uuid='fake-uuid-3')
self.pci_device.status = 'allocated'
self.pci_device.instance_uuid = 'fake-uuid-2'
expected_updates = dict(status='allocated',
instance_uuid='fake-uuid-2')
self.mox.StubOutWithMock(db, 'pci_device_update')
db.pci_device_update(ctxt, 1, 'a',
expected_updates).AndReturn(return_dev)
self.mox.ReplayAll()
self.pci_device.save(ctxt)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid,
'fake-uuid-3')
self.assertRemotes()
def test_save_no_extra_info(self):
return_dev = dict(fake_db_dev, status='available',
instance_uuid='fake-uuid-3')
def _fake_update(ctxt, node_id, addr, updates):
self.extra_info = updates.get('extra_info')
return return_dev
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_update', _fake_update)
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.save(ctxt)
self.assertEqual(self.extra_info, '{}')
def test_save_removed(self):
ctxt = context.get_admin_context()
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.mox.StubOutWithMock(db, 'pci_device_destroy')
db.pci_device_destroy(ctxt, 1, 'a')
self.mox.ReplayAll()
self.pci_device.save(ctxt)
self.assertEqual(self.pci_device.status, 'deleted')
self.assertRemotes()
def test_save_deleted(self):
def _fake_destroy(ctxt, node_id, addr):
self.called = True
def _fake_update(ctxt, node_id, addr, updates):
self.called = True
ctxt = context.get_admin_context()
self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
self.stubs.Set(db, 'pci_device_update', _fake_update)
self._create_fake_pci_device()
self.pci_device.status = 'deleted'
self.called = False
self.pci_device.save(ctxt)
self.assertEqual(self.called, False)
class TestPciDeviceObject(test_objects._LocalTest,
_TestPciDeviceObject):
pass
class TestPciDeviceObjectRemote(test_objects._RemoteTest,
_TestPciDeviceObject):
pass
fake_pci_devs = [fake_db_dev, fake_db_dev_1]
class _TestPciDeviceListObject(object):
def test_get_by_compute_node(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')
db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)
self.mox.ReplayAll()
devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)
for i in range(len(fake_pci_devs)):
self.assertIsInstance(devs[i], pci_device.PciDevice)
self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)
self.assertRemotes()
def test_get_by_instance_uuid(self):
ctxt = context.get_admin_context()
fake_db_1 = dict(fake_db_dev, address='a1',
status='allocated', instance_uuid='1')
fake_db_2 = dict(fake_db_dev, address='a2',
status='allocated', instance_uuid='1')
self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')
db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(
[fake_db_1, fake_db_2])
self.mox.ReplayAll()
devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')
self.assertEqual(len(devs), 2)
for i in range(len(fake_pci_devs)):
self.assertIsInstance(devs[i], pci_device.PciDevice)
self.assertEqual(devs[0].vendor_id, 'v')
self.assertEqual(devs[1].vendor_id, 'v')
self.assertRemotes()
class TestPciDeviceListObject(test_objects._LocalTest,
_TestPciDeviceListObject):
pass
class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
_TestPciDeviceListObject):
pass
| |
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import random
import threading
import time
import uuid
import yaml
from guild import opref as opreflib
from guild import util
from guild import yaml_util
class Run(object):
__properties__ = [
"id",
"path",
"short_id",
"opref",
"pid",
"status",
"timestamp",
]
def __init__(self, id, path):
self.id = id
self.path = path
self._guild_dir = os.path.join(self.path, ".guild")
self._opref = None
self._props = util.PropertyCache(
[
("timestamp", None, self._get_timestamp, 1.0),
("pid", None, self._get_pid, 1.0),
]
)
@property
def short_id(self):
return self.id[:8]
@property
def dir(self):
"""Alias for path attr."""
return self.path
@property
def opref(self):
if not self._opref:
encoded = self._read_opref()
if encoded:
try:
self._opref = opreflib.OpRef.parse(encoded)
except opreflib.OpRefError as e:
raise opreflib.OpRefError(
"invalid opref for run %r (%s): %s" % (self.id, self.path, e)
)
return self._opref
def _read_opref(self):
return util.try_read(self._opref_path())
def _opref_path(self):
return self.guild_path("opref")
def write_opref(self, opref):
self.write_encoded_opref(str(opref))
def write_encoded_opref(self, encoded):
with open(self._opref_path(), "w") as f:
f.write(encoded)
self._opref = None
def reset_opref(self):
self._opref = None
@property
def pid(self):
return self._props.get("pid")
def _get_pid(self):
lockfile = self.guild_path("LOCK")
try:
raw = open(lockfile, "r").read(10)
except (IOError, ValueError):
return None
else:
try:
return int(raw)
except ValueError:
return None
@property
def status(self):
if os.path.exists(self.guild_path("LOCK.remote")):
return "running"
elif os.path.exists(self.guild_path("PENDING")):
return "pending"
elif os.path.exists(self.guild_path("STAGED")):
return "staged"
else:
return self._local_status()
@property
def remote(self):
remote_lock_path = self.guild_path("LOCK.remote")
return util.try_read(remote_lock_path, apply=str.strip)
@property
def timestamp(self):
return self._props.get("timestamp")
def _get_timestamp(self):
return util.find_apply(
[
lambda: self.get("started"),
lambda: self.get("initialized"),
lambda: None,
]
)
@property
def batch_proto(self):
proto_dir = self.guild_path("proto")
proto_opref_path = os.path.join(proto_dir, ".guild", "opref")
if os.path.exists(proto_opref_path):
return for_dir(proto_dir)
return None
def _local_status(self):
exit_status = self.get("exit_status")
if exit_status is not None:
return _status_for_exit_status(exit_status)
local_pid = self._get_pid()
if local_pid is not None and util.pid_exists(local_pid):
return "running"
return "error"
def get(self, name, default=None):
try:
val = self[name]
except KeyError:
return default
else:
return val if val is not None else default
def attr_names(self):
return sorted(util.safe_listdir(self._attrs_dir()))
def has_attr(self, name):
return os.path.exists(self._attr_path(name))
def iter_attrs(self):
for name in self.attr_names():
try:
yield name, self[name]
except KeyError:
pass
def __getitem__(self, name):
try:
f = open(self._attr_path(name), "r")
except IOError:
raise KeyError(name)
else:
return yaml.safe_load(f)
def _attr_path(self, name):
return os.path.join(self._attrs_dir(), name)
def _attrs_dir(self):
return os.path.join(self._guild_dir, "attrs")
def __repr__(self):
return "<%s.%s '%s'>" % (
self.__class__.__module__,
self.__class__.__name__,
self.id,
)
def init_skel(self):
util.ensure_dir(self.guild_path("attrs"))
if not self.has_attr("initialized"):
self.write_attr("id", self.id)
self.write_attr("initialized", timestamp())
def guild_path(self, *subpath):
if subpath is None:
return self._guild_dir
return os.path.join(*((self._guild_dir,) + tuple(subpath)))
def write_attr(self, name, val, raw=False):
if not raw:
val = yaml_util.encode_yaml(val)
with open(self._attr_path(name), "w") as f:
f.write(val)
f.write(os.linesep)
f.close()
def del_attr(self, name):
try:
os.remove(self._attr_path(name))
except OSError:
pass
def iter_files(self, all_files=False, follow_links=False):
for root, dirs, files in os.walk(self.path, followlinks=follow_links):
if not all_files and root == self.path:
try:
dirs.remove(".guild")
except ValueError:
pass
for name in dirs:
yield os.path.join(root, name)
for name in files:
yield os.path.join(root, name)
def iter_guild_files(self, subpath):
guild_path = self.guild_path(subpath)
if os.path.exists(guild_path):
for root, dirs, files in os.walk(guild_path):
rel_root = os.path.relpath(root, guild_path)
if rel_root == ".":
rel_root = ""
for name in dirs:
yield os.path.join(rel_root, name)
for name in files:
yield os.path.join(rel_root, name)
def _status_for_exit_status(exit_status):
assert exit_status is not None, exit_status
if exit_status == 0:
return "completed"
elif exit_status < 0:
return "terminated"
else:
return "error"
__last_ts = None
__last_ts_lock = threading.Lock()
def timestamp():
"""Returns an integer use for run timestamps.
Ensures that subsequent calls return increasing values.
"""
global __last_ts
ts = int(time.time() * 1000000)
with __last_ts_lock:
if __last_ts is not None and __last_ts >= ts:
ts = __last_ts + 1
__last_ts = ts
return ts
def timestamp_seconds(ts):
"""Returns seconds float from value generated by `timestamp`."""
return float(ts / 1000000)
def mkid():
return uuid.uuid4().hex
def for_dir(run_dir, id=None):
if not id:
id = os.path.basename(run_dir)
return Run(id, run_dir)
def random_seed():
return random.randint(0, pow(2, 32))
| |
"""Test various algorithmic properties of selectables."""
from sqlalchemy.testing import eq_, assert_raises, \
assert_raises_message, is_
from sqlalchemy import *
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \
AssertsExecutionResults
from sqlalchemy import testing
from sqlalchemy.sql import util as sql_util, visitors
from sqlalchemy import exc
from sqlalchemy.sql import table, column, null
from sqlalchemy import util
metadata = MetaData()
table1 = Table('table1', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(20)),
Column('col3', Integer),
Column('colx', Integer),
)
table2 = Table('table2', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', Integer, ForeignKey('table1.col1')),
Column('col3', String(20)),
Column('coly', Integer),
)
keyed = Table('keyed', metadata,
Column('x', Integer, key='colx'),
Column('y', Integer, key='coly'),
Column('z', Integer),
)
class SelectableTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL):
__dialect__ = 'default'
def test_indirect_correspondence_on_labels(self):
# this test depends upon 'distance' to
# get the right result
# same column three times
s = select([table1.c.col1.label('c2'), table1.c.col1,
table1.c.col1.label('c1')])
# this tests the same thing as
# test_direct_correspondence_on_labels below -
# that the presence of label() affects the 'distance'
assert s.corresponding_column(table1.c.col1) is s.c.col1
assert s.corresponding_column(s.c.col1) is s.c.col1
assert s.corresponding_column(s.c.c1) is s.c.c1
def test_labeled_subquery_twice(self):
scalar_select = select([table1.c.col1]).label('foo')
s1 = select([scalar_select])
s2 = select([scalar_select, scalar_select])
eq_(
s1.c.foo.proxy_set,
set([s1.c.foo, scalar_select, scalar_select.element])
)
eq_(
s2.c.foo.proxy_set,
set([s2.c.foo, scalar_select, scalar_select.element])
)
assert s1.corresponding_column(scalar_select) is s1.c.foo
assert s2.corresponding_column(scalar_select) is s2.c.foo
def test_label_grouped_still_corresponds(self):
label = select([table1.c.col1]).label('foo')
label2 = label.self_group()
s1 = select([label])
s2 = select([label2])
assert s1.corresponding_column(label) is s1.c.foo
assert s2.corresponding_column(label) is s2.c.foo
def test_direct_correspondence_on_labels(self):
# this test depends on labels being part
# of the proxy set to get the right result
l1, l2 = table1.c.col1.label('foo'), table1.c.col1.label('bar')
sel = select([l1, l2])
sel2 = sel.alias()
assert sel2.corresponding_column(l1) is sel2.c.foo
assert sel2.corresponding_column(l2) is sel2.c.bar
sel2 = select([table1.c.col1.label('foo'), table1.c.col2.label('bar')])
sel3 = sel.union(sel2).alias()
assert sel3.corresponding_column(l1) is sel3.c.foo
assert sel3.corresponding_column(l2) is sel3.c.bar
def test_keyed_gen(self):
s = select([keyed])
eq_(s.c.colx.key, 'colx')
eq_(s.c.colx.name, 'x')
assert s.corresponding_column(keyed.c.colx) is s.c.colx
assert s.corresponding_column(keyed.c.coly) is s.c.coly
assert s.corresponding_column(keyed.c.z) is s.c.z
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.z
def test_keyed_label_gen(self):
s = select([keyed]).apply_labels()
assert s.corresponding_column(keyed.c.colx) is s.c.keyed_colx
assert s.corresponding_column(keyed.c.coly) is s.c.keyed_coly
assert s.corresponding_column(keyed.c.z) is s.c.keyed_z
sel2 = s.alias()
assert sel2.corresponding_column(keyed.c.colx) is sel2.c.keyed_colx
assert sel2.corresponding_column(keyed.c.coly) is sel2.c.keyed_coly
assert sel2.corresponding_column(keyed.c.z) is sel2.c.keyed_z
def test_keyed_c_collection_upper(self):
c = Column('foo', Integer, key='bar')
t = Table('t', MetaData(), c)
is_(t.c.bar, c)
def test_keyed_c_collection_lower(self):
c = column('foo')
c.key = 'bar'
t = table('t', c)
is_(t.c.bar, c)
def test_clone_c_proxy_key_upper(self):
c = Column('foo', Integer, key='bar')
t = Table('t', MetaData(), c)
s = select([t])._clone()
assert c in s.c.bar.proxy_set
def test_clone_c_proxy_key_lower(self):
c = column('foo')
c.key = 'bar'
t = table('t', c)
s = select([t])._clone()
assert c in s.c.bar.proxy_set
def test_distance_on_aliases(self):
a1 = table1.alias('a1')
for s in (select([a1, table1], use_labels=True),
select([table1, a1], use_labels=True)):
assert s.corresponding_column(table1.c.col1) \
is s.c.table1_col1
assert s.corresponding_column(a1.c.col1) is s.c.a1_col1
def test_join_against_self(self):
jj = select([table1.c.col1.label('bar_col1')])
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
# test column directly agaisnt itself
assert jjj.corresponding_column(jjj.c.table1_col1) \
is jjj.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jjj.c.bar_col1
# test alias of the join
j2 = jjj.alias('foo')
assert j2.corresponding_column(table1.c.col1) \
is j2.c.table1_col1
def test_clone_append_column(self):
sel = select([literal_column('1').label('a')])
eq_(sel.c.keys(), ['a'])
cloned = visitors.ReplacingCloningVisitor().traverse(sel)
cloned.append_column(literal_column('2').label('b'))
cloned.append_column(func.foo())
eq_(cloned.c.keys(), ['a', 'b', 'foo()'])
def test_append_column_after_replace_selectable(self):
basesel = select([literal_column('1').label('a')])
tojoin = select([
literal_column('1').label('a'),
literal_column('2').label('b')
])
basefrom = basesel.alias('basefrom')
joinfrom = tojoin.alias('joinfrom')
sel = select([basefrom.c.a])
replaced = sel.replace_selectable(
basefrom,
basefrom.join(joinfrom, basefrom.c.a == joinfrom.c.a)
)
self.assert_compile(
replaced,
"SELECT basefrom.a FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a"
)
replaced.append_column(joinfrom.c.b)
self.assert_compile(
replaced,
"SELECT basefrom.a, joinfrom.b FROM (SELECT 1 AS a) AS basefrom "
"JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom "
"ON basefrom.a = joinfrom.a"
)
def test_against_cloned_non_table(self):
# test that corresponding column digs across
# clone boundaries with anonymous labeled elements
col = func.count().label('foo')
sel = select([col])
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
assert sel2.corresponding_column(col) is sel2.c.foo
sel3 = visitors.ReplacingCloningVisitor().traverse(sel2)
assert sel3.corresponding_column(col) is sel3.c.foo
def test_with_only_generative(self):
s1 = table1.select().as_scalar()
self.assert_compile(
s1.with_only_columns([s1]),
"SELECT (SELECT table1.col1, table1.col2, "
"table1.col3, table1.colx FROM table1) AS anon_1"
)
def test_type_coerce_preserve_subq(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column('x'), MyType).label('foo')])
stmt2 = stmt.select()
assert isinstance(stmt._raw_columns[0].type, MyType)
assert isinstance(stmt.c.foo.type, MyType)
assert isinstance(stmt2.c.foo.type, MyType)
def test_select_on_table(self):
sel = select([table1, table2], use_labels=True)
assert sel.corresponding_column(table1.c.col1) \
is sel.c.table1_col1
assert sel.corresponding_column(table1.c.col1,
require_embedded=True) is sel.c.table1_col1
assert table1.corresponding_column(sel.c.table1_col1) \
is table1.c.col1
assert table1.corresponding_column(sel.c.table1_col1,
require_embedded=True) is None
def test_join_against_join(self):
j = outerjoin(table1, table2, table1.c.col1 == table2.c.col2)
jj = select([table1.c.col1.label('bar_col1')],
from_obj=[j]).alias('foo')
jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1)
assert jjj.corresponding_column(jjj.c.table1_col1) \
is jjj.c.table1_col1
j2 = jjj.alias('foo')
assert j2.corresponding_column(jjj.c.table1_col1) \
is j2.c.table1_col1
assert jjj.corresponding_column(jj.c.bar_col1) is jj.c.bar_col1
def test_table_alias(self):
a = table1.alias('a')
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_union(self):
# tests that we can correspond a column in a Select statement
# with a certain Table, against a column in a Union where one of
# its underlying Selects matches to that same Table
u = select([table1.c.col1, table1.c.col2, table1.c.col3,
table1.c.colx, null().label('coly'
)]).union(select([table2.c.col1, table2.c.col2,
table2.c.col3, null().label('colx'),
table2.c.coly]))
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
def test_union_precedence(self):
# conflicting column correspondence should be resolved based on
# the order of the select()s in the union
s1 = select([table1.c.col1, table1.c.col2])
s2 = select([table1.c.col2, table1.c.col1])
s3 = select([table1.c.col3, table1.c.colx])
s4 = select([table1.c.colx, table1.c.col3])
u1 = union(s1, s2)
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
u1 = union(s1, s2, s3, s4)
assert u1.corresponding_column(table1.c.col1) is u1.c.col1
assert u1.corresponding_column(table1.c.col2) is u1.c.col2
assert u1.corresponding_column(table1.c.colx) is u1.c.col2
assert u1.corresponding_column(table1.c.col3) is u1.c.col1
def test_singular_union(self):
u = union(select([table1.c.col1, table1.c.col2,
table1.c.col3]), select([table1.c.col1,
table1.c.col2, table1.c.col3]))
u = union(select([table1.c.col1, table1.c.col2, table1.c.col3]))
assert u.c.col1 is not None
assert u.c.col2 is not None
assert u.c.col3 is not None
def test_alias_union(self):
# same as testunion, except its an alias of the union
u = select([table1.c.col1, table1.c.col2, table1.c.col3,
table1.c.colx, null().label('coly'
)]).union(select([table2.c.col1, table2.c.col2,
table2.c.col3, null().label('colx'),
table2.c.coly])).alias('analias')
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert u.corresponding_column(s1.c.table1_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_col2) is u.c.col2
assert u.corresponding_column(s2.c.table2_coly) is u.c.coly
assert s2.corresponding_column(u.c.coly) is s2.c.table2_coly
def test_select_union(self):
# like testaliasunion, but off a Select off the union.
u = select([table1.c.col1, table1.c.col2, table1.c.col3,
table1.c.colx, null().label('coly'
)]).union(select([table2.c.col1, table2.c.col2,
table2.c.col3, null().label('colx'),
table2.c.coly])).alias('analias')
s = select([u])
s1 = table1.select(use_labels=True)
s2 = table2.select(use_labels=True)
assert s.corresponding_column(s1.c.table1_col2) is s.c.col2
assert s.corresponding_column(s2.c.table2_col2) is s.c.col2
def test_union_against_join(self):
# same as testunion, except its an alias of the union
u = select([table1.c.col1, table1.c.col2, table1.c.col3,
table1.c.colx, null().label('coly'
)]).union(select([table2.c.col1, table2.c.col2,
table2.c.col3, null().label('colx'),
table2.c.coly])).alias('analias')
j1 = table1.join(table2)
assert u.corresponding_column(j1.c.table1_colx) is u.c.colx
assert j1.corresponding_column(u.c.colx) is j1.c.table1_colx
def test_join(self):
a = join(table1, table2)
print str(a.select(use_labels=True))
b = table2.alias('b')
j = join(a, b)
print str(j)
criterion = a.c.table1_col1 == b.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_alias(self):
a = table1.select().alias('a')
j = join(a, table2)
criterion = a.c.col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_select_labels(self):
a = table1.select(use_labels=True)
j = join(a, table2)
criterion = a.c.table1_col1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_scalar_cloned_comparator(self):
sel = select([table1.c.col1]).as_scalar()
expr = sel == table1.c.col1
sel2 = visitors.ReplacingCloningVisitor().traverse(sel)
expr2 = sel2 == table1.c.col1
is_(expr2.left, sel2)
def test_column_labels(self):
a = select([table1.c.col1.label('acol1'),
table1.c.col2.label('acol2'),
table1.c.col3.label('acol3')])
j = join(a, table2)
criterion = a.c.acol1 == table2.c.col2
self.assert_(criterion.compare(j.onclause))
def test_labeled_select_correspoinding(self):
l1 = select([func.max(table1.c.col1)]).label('foo')
s = select([l1])
eq_(s.corresponding_column(l1), s.c.foo)
s = select([table1.c.col1, l1])
eq_(s.corresponding_column(l1), s.c.foo)
def test_select_alias_labels(self):
a = table2.select(use_labels=True).alias('a')
j = join(a, table1)
criterion = table1.c.col1 == a.c.table2_col2
self.assert_(criterion.compare(j.onclause))
def test_table_joined_to_select_of_table(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
j2 = select([a.c.id.label('aid')]).alias('bar')
j3 = a.join(j2, j2.c.aid == a.c.id)
j4 = select([j3]).alias('foo')
assert j4.corresponding_column(j2.c.aid) is j4.c.aid
assert j4.corresponding_column(a.c.id) is j4.c.id
def test_two_metadata_join_raises(self):
m = MetaData()
m2 = MetaData()
t1 = Table('t1', m, Column('id', Integer), Column('id2', Integer))
t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id')))
t3 = Table('t3', m2, Column('id', Integer, ForeignKey('t1.id2')))
s = select([t2, t3], use_labels=True)
assert_raises(exc.NoReferencedTableError, s.join, t1)
def test_multi_label_chain_naming_col(self):
# See [ticket:2167] for this one.
l1 = table1.c.col1.label('a')
l2 = select([l1]).label('b')
s = select([l2])
assert s.c.b is not None
self.assert_compile(
s.select(),
"SELECT b FROM (SELECT (SELECT table1.col1 AS a FROM table1) AS b)"
)
s2 = select([s.label('c')])
self.assert_compile(
s2.select(),
"SELECT c FROM (SELECT (SELECT (SELECT table1.col1 AS a FROM table1) AS b) AS c)"
)
def test_unusual_column_elements_text(self):
"""test that .c excludes text()."""
s = select([table1.c.col1, text("foo")])
eq_(
list(s.c),
[s.c.col1]
)
def test_unusual_column_elements_clauselist(self):
"""Test that raw ClauseList is expanded into .c."""
from sqlalchemy.sql.expression import ClauseList
s = select([table1.c.col1, ClauseList(table1.c.col2, table1.c.col3)])
eq_(
list(s.c),
[s.c.col1, s.c.col2, s.c.col3]
)
def test_unusual_column_elements_boolean_clauselist(self):
"""test that BooleanClauseList is placed as single element in .c."""
c2 = and_(table1.c.col2 == 5, table1.c.col3 == 4)
s = select([table1.c.col1, c2])
eq_(
list(s.c),
[s.c.col1, s.corresponding_column(c2)]
)
def test_from_list_deferred_constructor(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1])
t = Table('t', MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select([c1]),
"SELECT t.c1 FROM t")
self.assert_compile(select([c2]),
"SELECT t.c2 FROM t")
def test_from_list_deferred_whereclause(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1]).where(c1 == 5)
t = Table('t', MetaData(), c1, c2)
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
self.assert_compile(select([c1]),
"SELECT t.c1 FROM t")
self.assert_compile(select([c2]),
"SELECT t.c2 FROM t")
def test_from_list_deferred_fromlist(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer))
c1 = Column('c1', Integer)
s = select([c1]).where(c1 == 5).select_from(t1)
t2 = Table('t2', MetaData(), c1)
eq_(c1._from_objects, [t2])
self.assert_compile(select([c1]),
"SELECT t2.c1 FROM t2")
def test_from_list_deferred_cloning(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1])
s2 = select([c2])
s3 = sql_util.ClauseAdapter(s).traverse(s2)
Table('t', MetaData(), c1, c2)
self.assert_compile(
s3,
"SELECT t.c2 FROM t"
)
def test_from_list_with_columns(self):
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s1 = select([table1.c.a, table2.c.b])
self.assert_compile(s1,
"SELECT t1.a, t2.b FROM t1, t2"
)
s2 = s1.with_only_columns([table2.c.b])
self.assert_compile(s2,
"SELECT t2.b FROM t2"
)
s3 = sql_util.ClauseAdapter(table1).traverse(s1)
self.assert_compile(s3,
"SELECT t1.a, t2.b FROM t1, t2"
)
s4 = s3.with_only_columns([table2.c.b])
self.assert_compile(s4,
"SELECT t2.b FROM t2"
)
def test_from_list_warning_against_existing(self):
c1 = Column('c1', Integer)
s = select([c1])
# force a compile.
self.assert_compile(
s,
"SELECT c1"
)
Table('t', MetaData(), c1)
self.assert_compile(
s,
"SELECT t.c1 FROM t"
)
def test_from_list_recovers_after_warning(self):
c1 = Column('c1', Integer)
c2 = Column('c2', Integer)
s = select([c1])
# force a compile.
eq_(str(s), "SELECT c1")
@testing.emits_warning()
def go():
return Table('t', MetaData(), c1, c2)
t = go()
eq_(c1._from_objects, [t])
eq_(c2._from_objects, [t])
# 's' has been baked. Can't afford
# not caching select._froms.
# hopefully the warning will clue the user
self.assert_compile(s, "SELECT t.c1 FROM t")
self.assert_compile(select([c1]), "SELECT t.c1 FROM t")
self.assert_compile(select([c2]), "SELECT t.c2 FROM t")
def test_label_gen_resets_on_table(self):
c1 = Column('c1', Integer)
eq_(c1._label, "c1")
Table('t1', MetaData(), c1)
eq_(c1._label, "t1_c1")
class RefreshForNewColTest(fixtures.TestBase):
def test_join_uninit(self):
a = table('a', column('x'))
b = table('b', column('y'))
j = a.join(b, a.c.x == b.c.y)
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_join_samename_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
j = a.join(b, a.c.x == b.c.y)
j.c
q = column('x')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_x is q
def test_select_samename_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
s = select([a, b]).apply_labels()
s.c
q = column('x')
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_uninit(self):
a = table('a', column('x'))
b = table('b', column('y'))
s = select([a, b]).apply_labels().alias()
q = column('x')
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_samename_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
s = select([a, b]).apply_labels().alias()
s.c
q = column('x')
b.append_column(q)
s._refresh_for_new_column(q)
assert q in s.c.b_x.proxy_set
def test_aliased_select_irrelevant(self):
a = table('a', column('x'))
b = table('b', column('y'))
c = table('c', column('z'))
s = select([a, b]).apply_labels().alias()
s.c
q = column('x')
c.append_column(q)
s._refresh_for_new_column(q)
assert 'c_x' not in s.c
def test_aliased_select_no_cols_clause(self):
a = table('a', column('x'))
s = select([a.c.x]).apply_labels().alias()
s.c
q = column('q')
a.append_column(q)
s._refresh_for_new_column(q)
assert 'a_q' not in s.c
def test_union_uninit(self):
a = table('a', column('x'))
s1 = select([a])
s2 = select([a])
s3 = s1.union(s2)
q = column('q')
a.append_column(q)
s3._refresh_for_new_column(q)
assert a.c.q in s3.c.q.proxy_set
def test_union_init_raises(self):
a = table('a', column('x'))
s1 = select([a])
s2 = select([a])
s3 = s1.union(s2)
s3.c
q = column('q')
a.append_column(q)
assert_raises_message(
NotImplementedError,
"CompoundSelect constructs don't support addition of "
"columns to underlying selectables",
s3._refresh_for_new_column, q
)
def test_nested_join_uninit(self):
a = table('a', column('x'))
b = table('b', column('y'))
c = table('c', column('z'))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
def test_nested_join_init(self):
a = table('a', column('x'))
b = table('b', column('y'))
c = table('c', column('z'))
j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z)
j.c
q = column('q')
b.append_column(q)
j._refresh_for_new_column(q)
assert j.c.b_q is q
class AnonLabelTest(fixtures.TestBase):
"""Test behaviors fixed by [ticket:2168]."""
def test_anon_labels_named_column(self):
c1 = column('x')
assert c1.label(None) is not c1
eq_(str(select([c1.label(None)])), "SELECT x AS x_1")
def test_anon_labels_literal_column(self):
c1 = literal_column('x')
assert c1.label(None) is not c1
eq_(str(select([c1.label(None)])), "SELECT x AS x_1")
def test_anon_labels_func(self):
c1 = func.count('*')
assert c1.label(None) is not c1
eq_(str(select([c1])), "SELECT count(:param_1) AS count_1")
c2 = select([c1]).compile()
eq_(str(select([c1.label(None)])), "SELECT count(:param_1) AS count_1")
def test_named_labels_named_column(self):
c1 = column('x')
eq_(str(select([c1.label('y')])), "SELECT x AS y")
def test_named_labels_literal_column(self):
c1 = literal_column('x')
eq_(str(select([c1.label('y')])), "SELECT x AS y")
class JoinConditionTest(fixtures.TestBase, AssertsExecutionResults):
def test_join_condition(self):
m = MetaData()
t1 = Table('t1', m, Column('id', Integer))
t2 = Table('t2', m, Column('id', Integer), Column('t1id',
ForeignKey('t1.id')))
t3 = Table('t3', m, Column('id', Integer), Column('t1id',
ForeignKey('t1.id')), Column('t2id',
ForeignKey('t2.id')))
t4 = Table('t4', m, Column('id', Integer), Column('t2id',
ForeignKey('t2.id')))
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
for (left, right, a_subset, expected) in [
(t1, t2, None, t1.c.id == t2.c.t1id),
(t1t2, t3, t2, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t3, t1.c.id == t3.c.t1id),
(t2t3, t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t3, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t1, t2t3.c.t2_id == t4.c.t2id),
(t1t2, t2t3, t2, t1t2.c.t2_id == t2t3.c.t3_t2id),
]:
assert expected.compare(sql_util.join_condition(left,
right, a_subset=a_subset))
# these are ambiguous, or have no joins
for left, right, a_subset in [
(t1t2, t3, None),
(t2t3, t1, None),
(t1, t4, None),
(t1t2, t2t3, None),
]:
assert_raises(
exc.ArgumentError,
sql_util.join_condition,
left, right, a_subset=a_subset
)
als = t2t3.alias()
# test join's behavior, including natural
for left, right, expected in [
(t1, t2, t1.c.id == t2.c.t1id),
(t1t2, t3, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t1.c.id == t3.c.t1id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t1t2, als, t1t2.c.t2_id == als.c.t3_t2id)
]:
assert expected.compare(
left.join(right).onclause
)
# TODO: this raises due to right side being "grouped", and no
# longer has FKs. Did we want to make FromGrouping friendlier
# ?
assert_raises_message(exc.ArgumentError,
"Perhaps you meant to convert the right "
"side to a subquery using alias\(\)\?",
t1t2.join, t2t3)
assert_raises_message(exc.ArgumentError,
"Perhaps you meant to convert the right "
"side to a subquery using alias\(\)\?",
t1t2.join, t2t3.select(use_labels=True))
def test_join_cond_no_such_unrelated_table(self):
m = MetaData()
# bounding the "good" column with two "bad" ones is so to
# try to get coverage to get the "continue" statements
# in the loop...
t1 = Table('t1', m,
Column('y', Integer, ForeignKey('t22.id')),
Column('x', Integer, ForeignKey('t2.id')),
Column('q', Integer, ForeignKey('t22.id')),
)
t2 = Table('t2', m, Column('id', Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_column(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, ForeignKey('t2.id')),
Column('y', Integer, ForeignKey('t3.q')))
t2 = Table('t2', m, Column('id', Integer))
Table('t3', m, Column('id', Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_related_table(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table('t1', m1, Column('x', Integer, ForeignKey('t2.id')))
t2 = Table('t2', m2, Column('id', Integer))
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition, t1, t2
)
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition, t2, t1
)
def test_join_cond_no_such_related_column(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, ForeignKey('t2.q')))
t2 = Table('t2', m, Column('id', Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not create ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition, t1, t2
)
assert_raises_message(
exc.NoReferencedColumnError,
"Could not create ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition, t2, t1
)
class PrimaryKeyTest(fixtures.TestBase, AssertsExecutionResults):
def test_join_pk_collapse_implicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
foreign key relationships."""
meta = MetaData()
a = Table('a', meta, Column('id', Integer, primary_key=True))
b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'),
primary_key=True))
c = Table('c', meta, Column('id', Integer, ForeignKey('b.id'),
primary_key=True))
d = Table('d', meta, Column('id', Integer, ForeignKey('c.id'),
primary_key=True))
assert c.c.id.references(b.c.id)
assert not d.c.id.references(a.c.id)
assert list(a.join(b).primary_key) == [a.c.id]
assert list(b.join(c).primary_key) == [b.c.id]
assert list(a.join(b).join(c).primary_key) == [a.c.id]
assert list(b.join(c).join(d).primary_key) == [b.c.id]
assert list(d.join(c).join(b).primary_key) == [b.c.id]
assert list(a.join(b).join(c).join(d).primary_key) == [a.c.id]
def test_join_pk_collapse_explicit(self):
"""test that redundant columns in a join get 'collapsed' into a
minimal primary key, which is the root column along a chain of
explicit join conditions."""
meta = MetaData()
a = Table('a', meta, Column('id', Integer, primary_key=True),
Column('x', Integer))
b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'),
primary_key=True), Column('x', Integer))
c = Table('c', meta, Column('id', Integer, ForeignKey('b.id'),
primary_key=True), Column('x', Integer))
d = Table('d', meta, Column('id', Integer, ForeignKey('c.id'),
primary_key=True), Column('x', Integer))
print list(a.join(b, a.c.x == b.c.id).primary_key)
assert list(a.join(b, a.c.x == b.c.id).primary_key) == [a.c.id]
assert list(b.join(c, b.c.x == c.c.id).primary_key) == [b.c.id]
assert list(a.join(b).join(c, c.c.id == b.c.x).primary_key) \
== [a.c.id]
assert list(b.join(c, c.c.x == b.c.id).join(d).primary_key) \
== [b.c.id]
assert list(b.join(c, c.c.id == b.c.x).join(d).primary_key) \
== [b.c.id]
assert list(d.join(b, d.c.id == b.c.id).join(c, b.c.id
== c.c.x).primary_key) == [b.c.id]
assert list(a.join(b).join(c, c.c.id
== b.c.x).join(d).primary_key) == [a.c.id]
assert list(a.join(b, and_(a.c.id == b.c.id, a.c.x
== b.c.id)).primary_key) == [a.c.id]
def test_init_doesnt_blowitaway(self):
meta = MetaData()
a = Table('a', meta,
Column('id', Integer, primary_key=True),
Column('x', Integer))
b = Table('b', meta,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('x', Integer))
j = a.join(b)
assert list(j.primary_key) == [a.c.id]
j.foreign_keys
assert list(j.primary_key) == [a.c.id]
def test_non_column_clause(self):
meta = MetaData()
a = Table('a', meta,
Column('id', Integer, primary_key=True),
Column('x', Integer))
b = Table('b', meta,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('x', Integer, primary_key=True))
j = a.join(b, and_(a.c.id == b.c.id, b.c.x == 5))
assert str(j) == "a JOIN b ON a.id = b.id AND b.x = :x_1", str(j)
assert list(j.primary_key) == [a.c.id, b.c.x]
def test_onclause_direction(self):
metadata = MetaData()
employee = Table('Employee', metadata,
Column('name', String(100)),
Column('id', Integer, primary_key=True),
)
engineer = Table('Engineer', metadata,
Column('id', Integer,
ForeignKey('Employee.id'), primary_key=True))
eq_(util.column_set(employee.join(engineer, employee.c.id
== engineer.c.id).primary_key),
util.column_set([employee.c.id]))
eq_(util.column_set(employee.join(engineer, engineer.c.id
== employee.c.id).primary_key),
util.column_set([employee.c.id]))
class ReduceTest(fixtures.TestBase, AssertsExecutionResults):
def test_reduce(self):
meta = MetaData()
t1 = Table('t1', meta,
Column('t1id', Integer, primary_key=True),
Column('t1data', String(30)))
t2 = Table('t2', meta,
Column('t2id', Integer, ForeignKey('t1.t1id'), primary_key=True),
Column('t2data', String(30)))
t3 = Table('t3', meta,
Column('t3id', Integer, ForeignKey('t2.t2id'), primary_key=True),
Column('t3data', String(30)))
eq_(util.column_set(sql_util.reduce_columns([
t1.c.t1id,
t1.c.t1data,
t2.c.t2id,
t2.c.t2data,
t3.c.t3id,
t3.c.t3data,
])), util.column_set([t1.c.t1id, t1.c.t1data, t2.c.t2data,
t3.c.t3data]))
def test_reduce_selectable(self):
metadata = MetaData()
engineers = Table('engineers', metadata,
Column('engineer_id', Integer, primary_key=True),
Column('engineer_name', String(50)))
managers = Table('managers', metadata,
Column('manager_id', Integer, primary_key=True),
Column('manager_name', String(50)))
s = select([engineers,
managers]).where(engineers.c.engineer_name
== managers.c.manager_name)
eq_(util.column_set(sql_util.reduce_columns(list(s.c), s)),
util.column_set([s.c.engineer_id, s.c.engineer_name,
s.c.manager_id]))
def test_reduce_generation(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, primary_key=True),
Column('y', Integer))
t2 = Table('t2', m, Column('z', Integer, ForeignKey('t1.x')),
Column('q', Integer))
s1 = select([t1, t2])
s2 = s1.reduce_columns(only_synonyms=False)
eq_(
set(s2.inner_columns),
set([t1.c.x, t1.c.y, t2.c.q])
)
s2 = s1.reduce_columns()
eq_(
set(s2.inner_columns),
set([t1.c.x, t1.c.y, t2.c.z, t2.c.q])
)
def test_reduce_only_synonym_fk(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, primary_key=True),
Column('y', Integer))
t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')),
Column('q', Integer, ForeignKey('t1.y')))
s1 = select([t1, t2])
s1 = s1.reduce_columns(only_synonyms=True)
eq_(
set(s1.c),
set([s1.c.x, s1.c.y, s1.c.q])
)
def test_reduce_only_synonym_lineage(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, primary_key=True),
Column('y', Integer),
Column('z', Integer)
)
# test that the first appearance in the columns clause
# wins - t1 is first, t1.c.x wins
s1 = select([t1])
s2 = select([t1, s1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().inner_columns),
set([t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z])
)
# reverse order, s1.c.x wins
s1 = select([t1])
s2 = select([s1, t1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z)
eq_(
set(s2.reduce_columns().inner_columns),
set([s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z])
)
def test_reduce_aliased_join(self):
metadata = MetaData()
people = Table('people', metadata, Column('person_id', Integer,
Sequence('person_id_seq', optional=True),
primary_key=True), Column('name', String(50)),
Column('type', String(30)))
engineers = Table(
'engineers',
metadata,
Column('person_id', Integer, ForeignKey('people.person_id'
), primary_key=True),
Column('status', String(30)),
Column('engineer_name', String(50)),
Column('primary_language', String(50)),
)
managers = Table('managers', metadata, Column('person_id',
Integer, ForeignKey('people.person_id'),
primary_key=True), Column('status',
String(30)), Column('manager_name',
String(50)))
pjoin = \
people.outerjoin(engineers).outerjoin(managers).\
select(use_labels=True).alias('pjoin'
)
eq_(util.column_set(sql_util.reduce_columns([pjoin.c.people_person_id,
pjoin.c.engineers_person_id, pjoin.c.managers_person_id])),
util.column_set([pjoin.c.people_person_id]))
def test_reduce_aliased_union(self):
metadata = MetaData()
item_table = Table('item', metadata, Column('id', Integer,
ForeignKey('base_item.id'),
primary_key=True), Column('dummy', Integer,
default=0))
base_item_table = Table('base_item', metadata, Column('id',
Integer, primary_key=True),
Column('child_name', String(255),
default=None))
from sqlalchemy.orm.util import polymorphic_union
item_join = polymorphic_union({
'BaseItem':
base_item_table.select(
base_item_table.c.child_name
== 'BaseItem'),
'Item': base_item_table.join(item_table)},
None, 'item_join')
eq_(util.column_set(sql_util.reduce_columns([item_join.c.id,
item_join.c.dummy, item_join.c.child_name])),
util.column_set([item_join.c.id, item_join.c.dummy,
item_join.c.child_name]))
def test_reduce_aliased_union_2(self):
metadata = MetaData()
page_table = Table('page', metadata, Column('id', Integer,
primary_key=True))
magazine_page_table = Table('magazine_page', metadata,
Column('page_id', Integer,
ForeignKey('page.id'),
primary_key=True))
classified_page_table = Table('classified_page', metadata,
Column('magazine_page_id', Integer,
ForeignKey('magazine_page.page_id'), primary_key=True))
# this is essentially the union formed by the ORM's
# polymorphic_union function. we define two versions with
# different ordering of selects.
#
# the first selectable has the "real" column
# classified_page.magazine_page_id
pjoin = union(
select([
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id
]).
select_from(
page_table.join(magazine_page_table).
join(classified_page_table)),
select([
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label('magazine_page_id')
]).
select_from(page_table.join(magazine_page_table))
).alias('pjoin')
eq_(util.column_set(sql_util.reduce_columns([pjoin.c.id,
pjoin.c.page_id, pjoin.c.magazine_page_id])),
util.column_set([pjoin.c.id]))
# the first selectable has a CAST, which is a placeholder for
# classified_page.magazine_page_id in the second selectable.
# reduce_columns needs to take into account all foreign keys
# derived from pjoin.c.magazine_page_id. the UNION construct
# currently makes the external column look like that of the
# first selectable only.
pjoin = union(select([
page_table.c.id,
magazine_page_table.c.page_id,
cast(null(), Integer).label('magazine_page_id')
]).
select_from(page_table.join(magazine_page_table)),
select([
page_table.c.id,
magazine_page_table.c.page_id,
classified_page_table.c.magazine_page_id
]).
select_from(page_table.join(magazine_page_table).
join(classified_page_table))
).alias('pjoin')
eq_(util.column_set(sql_util.reduce_columns([pjoin.c.id,
pjoin.c.page_id, pjoin.c.magazine_page_id])),
util.column_set([pjoin.c.id]))
class DerivedTest(fixtures.TestBase, AssertsExecutionResults):
def test_table(self):
meta = MetaData()
t1 = Table('t1', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
assert t1.is_derived_from(t1)
assert not t2.is_derived_from(t1)
def test_alias(self):
meta = MetaData()
t1 = Table('t1', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
assert t1.alias().is_derived_from(t1)
assert not t2.alias().is_derived_from(t1)
assert not t1.is_derived_from(t1.alias())
assert not t1.is_derived_from(t2.alias())
def test_select(self):
meta = MetaData()
t1 = Table('t1', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
t2 = Table('t2', meta, Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
assert t1.select().is_derived_from(t1)
assert not t2.select().is_derived_from(t1)
assert select([t1, t2]).is_derived_from(t1)
assert t1.select().alias('foo').is_derived_from(t1)
assert select([t1, t2]).alias('foo').is_derived_from(t1)
assert not t2.select().alias('foo').is_derived_from(t1)
class AnnotationsTest(fixtures.TestBase):
def test_hashing(self):
t = table('t', column('x'))
a = t.alias()
s = t.select()
s2 = a.select()
for obj in [
t,
t.c.x,
a,
s,
s2,
t.c.x > 1,
(t.c.x > 1).label(None)
]:
annot = obj._annotate({})
eq_(set([obj]), set([annot]))
def test_compare(self):
t = table('t', column('x'), column('y'))
x_a = t.c.x._annotate({})
assert t.c.x.compare(x_a)
assert x_a.compare(t.c.x)
assert not x_a.compare(t.c.y)
assert not t.c.y.compare(x_a)
assert (t.c.x == 5).compare(x_a == 5)
assert not (t.c.y == 5).compare(x_a == 5)
s = select([t])
x_p = s.c.x
assert not x_a.compare(x_p)
assert not t.c.x.compare(x_p)
x_p_a = x_p._annotate({})
assert x_p_a.compare(x_p)
assert x_p.compare(x_p_a)
assert not x_p_a.compare(x_a)
def test_late_name_add(self):
from sqlalchemy.schema import Column
c1 = Column(Integer)
c1_a = c1._annotate({"foo": "bar"})
c1.name = 'somename'
eq_(c1_a.name, 'somename')
def test_custom_constructions(self):
from sqlalchemy.schema import Column
class MyColumn(Column):
def __init__(self):
Column.__init__(self, 'foo', Integer)
_constructor = Column
t1 = Table('t1', MetaData(), MyColumn())
s1 = t1.select()
assert isinstance(t1.c.foo, MyColumn)
assert isinstance(s1.c.foo, Column)
annot_1 = t1.c.foo._annotate({})
s2 = select([annot_1])
assert isinstance(s2.c.foo, Column)
annot_2 = s1._annotate({})
assert isinstance(annot_2.c.foo, Column)
def test_annotated_corresponding_column(self):
table1 = table('table1', column("col1"))
s1 = select([table1.c.col1])
t1 = s1._annotate({})
t2 = s1
# t1 needs to share the same _make_proxy() columns as t2, even
# though it's annotated. otherwise paths will diverge once they
# are corresponded against "inner" below.
assert t1.c is t2.c
assert t1.c.col1 is t2.c.col1
inner = select([s1])
assert inner.corresponding_column(t2.c.col1,
require_embedded=False) \
is inner.corresponding_column(t2.c.col1,
require_embedded=True) is inner.c.col1
assert inner.corresponding_column(t1.c.col1,
require_embedded=False) \
is inner.corresponding_column(t1.c.col1,
require_embedded=True) is inner.c.col1
def test_annotated_visit(self):
table1 = table('table1', column("col1"), column("col2"))
bin = table1.c.col1 == bindparam('foo', value=None)
assert str(bin) == "table1.col1 = :foo"
def visit_binary(b):
b.right = table1.c.col2
b2 = visitors.cloned_traverse(bin, {}, {'binary': visit_binary})
assert str(b2) == "table1.col1 = table1.col2"
b3 = visitors.cloned_traverse(bin._annotate({}), {}, {'binary':
visit_binary})
assert str(b3) == 'table1.col1 = table1.col2'
def visit_binary(b):
b.left = bindparam('bar')
b4 = visitors.cloned_traverse(b2, {}, {'binary': visit_binary})
assert str(b4) == ":bar = table1.col2"
b5 = visitors.cloned_traverse(b3, {}, {'binary': visit_binary})
assert str(b5) == ":bar = table1.col2"
def test_annotate_aliased(self):
t1 = table('t1', column('c1'))
s = select([(t1.c.c1 + 3).label('bat')])
a = s.alias()
a = sql_util._deep_annotate(a, {'foo': 'bar'})
eq_(a._annotations['foo'], 'bar')
eq_(a.element._annotations['foo'], 'bar')
def test_annotate_expressions(self):
table1 = table('table1', column('col1'), column('col2'))
for expr, expected in [(table1.c.col1, 'table1.col1'),
(table1.c.col1 == 5,
'table1.col1 = :col1_1'),
(table1.c.col1.in_([2, 3, 4]),
'table1.col1 IN (:col1_1, :col1_2, '
':col1_3)')]:
eq_(str(expr), expected)
eq_(str(expr._annotate({})), expected)
eq_(str(sql_util._deep_annotate(expr, {})), expected)
eq_(str(sql_util._deep_annotate(expr, {},
exclude=[table1.c.col1])), expected)
def test_deannotate(self):
table1 = table('table1', column("col1"), column("col2"))
bin = table1.c.col1 == bindparam('foo', value=None)
b2 = sql_util._deep_annotate(bin, {'_orm_adapt': True})
b3 = sql_util._deep_deannotate(b2)
b4 = sql_util._deep_deannotate(bin)
for elem in (b2._annotations, b2.left._annotations):
assert '_orm_adapt' in elem
for elem in b3._annotations, b3.left._annotations, \
b4._annotations, b4.left._annotations:
assert elem == {}
assert b2.left is not bin.left
assert b3.left is not b2.left is not bin.left
assert b4.left is bin.left # since column is immutable
# deannotate copies the element
assert bin.right is not b2.right is not b3.right is not b4.right
def test_annotate_unique_traversal(self):
"""test that items are copied only once during
annotate, deannotate traversal
#2453 - however note this was modified by
#1401, and it's likely that re49563072578
is helping us with the str() comparison
case now, as deannotate is making
clones again in some cases.
"""
table1 = table('table1', column('x'))
table2 = table('table2', column('y'))
a1 = table1.alias()
s = select([a1.c.x]).select_from(
a1.join(table2, a1.c.x == table2.c.y)
)
for sel in (
sql_util._deep_deannotate(s),
visitors.cloned_traverse(s, {}, {}),
visitors.replacement_traverse(s, {}, lambda x: None)
):
# the columns clause isn't changed at all
assert sel._raw_columns[0].table is a1
assert sel._froms[0] is sel._froms[1].left
eq_(str(s), str(sel))
# when we are modifying annotations sets only
# partially, each element is copied unconditionally
# when encountered.
for sel in (
sql_util._deep_deannotate(s, {"foo": "bar"}),
sql_util._deep_annotate(s, {'foo': 'bar'}),
):
assert sel._froms[0] is not sel._froms[1].left
# but things still work out due to
# re49563072578
eq_(str(s), str(sel))
def test_annotate_varied_annot_same_col(self):
"""test two instances of the same column with different annotations
preserving them when deep_annotate is run on them.
"""
t1 = table('table1', column("col1"), column("col2"))
s = select([t1.c.col1._annotate({"foo":"bar"})])
s2 = select([t1.c.col1._annotate({"bat":"hoho"})])
s3 = s.union(s2)
sel = sql_util._deep_annotate(s3, {"new": "thing"})
eq_(
sel.selects[0]._raw_columns[0]._annotations,
{"foo": "bar", "new": "thing"}
)
eq_(
sel.selects[1]._raw_columns[0]._annotations,
{"bat": "hoho", "new": "thing"}
)
def test_deannotate_2(self):
table1 = table('table1', column("col1"), column("col2"))
j = table1.c.col1._annotate({"remote": True}) == \
table1.c.col2._annotate({"local": True})
j2 = sql_util._deep_deannotate(j)
eq_(
j.left._annotations, {"remote": True}
)
eq_(
j2.left._annotations, {}
)
def test_deannotate_3(self):
table1 = table('table1', column("col1"), column("col2"),
column("col3"), column("col4"))
j = and_(
table1.c.col1._annotate({"remote": True}) ==
table1.c.col2._annotate({"local": True}),
table1.c.col3._annotate({"remote": True}) ==
table1.c.col4._annotate({"local": True})
)
j2 = sql_util._deep_deannotate(j)
eq_(
j.clauses[0].left._annotations, {"remote": True}
)
eq_(
j2.clauses[0].left._annotations, {}
)
def test_annotate_fromlist_preservation(self):
"""test the FROM list in select still works
even when multiple annotate runs have created
copies of the same selectable
#2453, continued
"""
table1 = table('table1', column('x'))
table2 = table('table2', column('y'))
a1 = table1.alias()
s = select([a1.c.x]).select_from(
a1.join(table2, a1.c.x == table2.c.y)
)
assert_s = select([select([s])])
for fn in (
sql_util._deep_deannotate,
lambda s: sql_util._deep_annotate(s, {'foo': 'bar'}),
lambda s: visitors.cloned_traverse(s, {}, {}),
lambda s: visitors.replacement_traverse(s, {}, lambda x: None)
):
sel = fn(select([fn(select([fn(s)]))]))
eq_(str(assert_s), str(sel))
def test_bind_unique_test(self):
table('t', column('a'), column('b'))
b = bindparam("bind", value="x", unique=True)
# the annotation of "b" should render the
# same. The "unique" test in compiler should
# also pass, [ticket:2425]
eq_(str(or_(b, b._annotate({"foo": "bar"}))),
":bind_1 OR :bind_1")
def test_comparators_cleaned_out_construction(self):
c = column('a')
comp1 = c.comparator
c1 = c._annotate({"foo": "bar"})
comp2 = c1.comparator
assert comp1 is not comp2
def test_comparators_cleaned_out_reannotate(self):
c = column('a')
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert comp1 is not comp2
def test_comparator_cleanout_integration(self):
c = column('a')
c1 = c._annotate({"foo": "bar"})
comp1 = c1.comparator
c2 = c1._annotate({"bat": "hoho"})
comp2 = c2.comparator
assert (c2 == 5).left._annotations == {"foo": "bar", "bat": "hoho"}
class WithLabelsTest(fixtures.TestBase):
def _assert_labels_warning(self, s):
assert_raises_message(
exc.SAWarning,
"replaced by another column with the same key",
lambda: s.c
)
def _assert_result_keys(self, s, keys):
compiled = s.compile()
eq_(set(compiled.result_map), set(keys))
def _assert_subq_result_keys(self, s, keys):
compiled = s.select().compile()
eq_(set(compiled.result_map), set(keys))
def _names_overlap(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer))
t2 = Table('t2', m, Column('x', Integer))
return select([t1, t2])
def test_names_overlap_nolabel(self):
sel = self._names_overlap()
self._assert_labels_warning(sel)
self._assert_result_keys(sel, ['x'])
def test_names_overlap_label(self):
sel = self._names_overlap().apply_labels()
eq_(
sel.c.keys(),
['t1_x', 't2_x']
)
self._assert_result_keys(sel, ['t1_x', 't2_x'])
def _names_overlap_keys_dont(self):
m = MetaData()
t1 = Table('t1', m, Column('x', Integer, key='a'))
t2 = Table('t2', m, Column('x', Integer, key='b'))
return select([t1, t2])
def test_names_overlap_keys_dont_nolabel(self):
sel = self._names_overlap_keys_dont()
eq_(
sel.c.keys(),
['a', 'b']
)
self._assert_result_keys(sel, ['x'])
def test_names_overlap_keys_dont_label(self):
sel = self._names_overlap_keys_dont().apply_labels()
eq_(
sel.c.keys(),
['t1_a', 't2_b']
)
self._assert_result_keys(sel, ['t1_x', 't2_x'])
def _labels_overlap(self):
m = MetaData()
t1 = Table('t', m, Column('x_id', Integer))
t2 = Table('t_x', m, Column('id', Integer))
return select([t1, t2])
def test_labels_overlap_nolabel(self):
sel = self._labels_overlap()
eq_(
sel.c.keys(),
['x_id', 'id']
)
self._assert_result_keys(sel, ['x_id', 'id'])
def test_labels_overlap_label(self):
sel = self._labels_overlap().apply_labels()
t2 = sel.froms[1]
eq_(
sel.c.keys(),
['t_x_id', t2.c.id.anon_label]
)
self._assert_result_keys(sel, ['t_x_id', 'id_1'])
self._assert_subq_result_keys(sel, ['t_x_id', 'id_1'])
def _labels_overlap_keylabels_dont(self):
m = MetaData()
t1 = Table('t', m, Column('x_id', Integer, key='a'))
t2 = Table('t_x', m, Column('id', Integer, key='b'))
return select([t1, t2])
def test_labels_overlap_keylabels_dont_nolabel(self):
sel = self._labels_overlap_keylabels_dont()
eq_(sel.c.keys(), ['a', 'b'])
self._assert_result_keys(sel, ['x_id', 'id'])
def test_labels_overlap_keylabels_dont_label(self):
sel = self._labels_overlap_keylabels_dont().apply_labels()
eq_(sel.c.keys(), ['t_a', 't_x_b'])
self._assert_result_keys(sel, ['t_x_id', 'id_1'])
def _keylabels_overlap_labels_dont(self):
m = MetaData()
t1 = Table('t', m, Column('a', Integer, key='x_id'))
t2 = Table('t_x', m, Column('b', Integer, key='id'))
return select([t1, t2])
def test_keylabels_overlap_labels_dont_nolabel(self):
sel = self._keylabels_overlap_labels_dont()
eq_(sel.c.keys(), ['x_id', 'id'])
self._assert_result_keys(sel, ['a', 'b'])
def test_keylabels_overlap_labels_dont_label(self):
sel = self._keylabels_overlap_labels_dont().apply_labels()
t2 = sel.froms[1]
eq_(sel.c.keys(), ['t_x_id', t2.c.id.anon_label])
self._assert_result_keys(sel, ['t_a', 't_x_b'])
self._assert_subq_result_keys(sel, ['t_a', 't_x_b'])
def _keylabels_overlap_labels_overlap(self):
m = MetaData()
t1 = Table('t', m, Column('x_id', Integer, key='x_a'))
t2 = Table('t_x', m, Column('id', Integer, key='a'))
return select([t1, t2])
def test_keylabels_overlap_labels_overlap_nolabel(self):
sel = self._keylabels_overlap_labels_overlap()
eq_(sel.c.keys(), ['x_a', 'a'])
self._assert_result_keys(sel, ['x_id', 'id'])
self._assert_subq_result_keys(sel, ['x_id', 'id'])
def test_keylabels_overlap_labels_overlap_label(self):
sel = self._keylabels_overlap_labels_overlap().apply_labels()
t2 = sel.froms[1]
eq_(sel.c.keys(), ['t_x_a', t2.c.a.anon_label])
self._assert_result_keys(sel, ['t_x_id', 'id_1'])
self._assert_subq_result_keys(sel, ['t_x_id', 'id_1'])
def _keys_overlap_names_dont(self):
m = MetaData()
t1 = Table('t1', m, Column('a', Integer, key='x'))
t2 = Table('t2', m, Column('b', Integer, key='x'))
return select([t1, t2])
def test_keys_overlap_names_dont_nolabel(self):
sel = self._keys_overlap_names_dont()
self._assert_labels_warning(sel)
self._assert_result_keys(sel, ['a', 'b'])
def test_keys_overlap_names_dont_label(self):
sel = self._keys_overlap_names_dont().apply_labels()
eq_(
sel.c.keys(),
['t1_x', 't2_x']
)
self._assert_result_keys(sel, ['t1_a', 't2_b'])
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2012 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from faint.pdf.xobject import XObject
from faint.pdf.stream import Stream
# Types of objects that should be included in the catalog
_catalogued = ["/Pages", "/Outlines"]
class pdf_id:
"""An object ID and a generation number (always 0 here) e.g. 1 0"""
def __init__(self, num):
self.num = num
def __str__(self):
return "%d 0" % self.num
def reference(self):
return "%d 0 R" % self.num
def __hash__(self):
return self.num
def __lt__(self, other):
return self.num < other.num
def __eq__(self, other):
return self.num == other.num
def _format_entry(key, value):
"""Outputs an entry in a PDF dictionary"""
return "/" + key + " " + str(value)
class Object:
"""Base class for PDF objects with dictionaries"""
def __init__(self):
pass
def __len__(self):
return len(self.keys())
def keys(self):
return []
def data(self):
return ""
class Font(Object):
"""PDF Font object"""
def __init__(self, baseFont):
self.baseFont = baseFont
def keys(self):
return ["Type",
"Subtype",
"BaseFont"]
def data(self):
return ""
def __getitem__(self, item):
if item == "Type":
return "/Font"
elif item == "Subtype":
return "/Type1"
elif item == "BaseFont":
return self.baseFont
class Pages(Object):
"""PDF Pages object, indicating the individual pages"""
def __init__(self):
self.kids = []
def keys(self):
return ["Type", "Kids", "Count"]
def __len__(self):
return len(self.keys())
def __setitem__(self, item, value):
pass
def __getitem__(self, item):
if item == "Count":
return len(self.kids)
elif item == "Kids":
return "[" + " ".join([kid.reference() for kid in self.kids]) + "]"
elif item == "Type":
return "/Pages"
else:
assert(False)
def add(self, id):
self.kids.append(id)
def data(self):
return ""
class Resources(Object):
def __init__(self):
self.items = {}
self.xobjects = []
def __len__(self):
return 0
def add_proc_ref(self, procRef):
self.items["ProcRef"] = procRef
def add_xobject(self, object_id):
self.xobjects.append(object_id)
return self._xobject_index_to_name(len(self.xobjects) - 1)
def __getitem__(self, item):
return self.items[item]
def __str__(self):
proc_ref = self.items["ProcRef"].ref()
rows = []
rows.append(" /ProcSet %s R" % proc_ref)
for xobj_num, xobj_ref in enumerate(self.xobjects):
rows.append(" /XObject << %s %s R >>" % (
self._xobject_index_to_name(xobj_num),
xobj_ref))
return "<< \n" + "\n".join(rows) + " >>"
def _xobject_index_to_name(self, index):
return "/X%d" % (index + 1)
class ProcRef:
def __init__(self, doc):
self.doc = doc
def ref(self):
return self.doc.get_resource_id()
def __str__(self):
return " << /ProcSet %d %d R >>" % self.doc.get_resource_id()
class Proc(Object):
def data(self):
return "[/PDF]\n"
def __str__(self):
return "[/PDF]\n"
class Document:
"""A PDF document composed of the various Objects and one or more pages
containing a Stream each.
"""
def __init__(self):
self.identifiers = []
self.objects = {}
# The PDF /Catalog item
self.catalog = {"Type" : "/Catalog"}
self._append(self.catalog)
# The PDF /Outlines item
self.outlines = {"Type": "/Outlines", "Count" : 0}
self._append(self.outlines)
# The PDF /Pages item
self.pages = Pages()
self.pagesId = self._append(self.pages)
self.resources = Resources()
self.procSet = Proc()
# Map of object numbers to lists of PDF comments. The comments
# indicated by an id precede the object identified by that id
# (as a way to know where the comments should go when
# emitting)
self.comments = {}
def get_resource_id(self):
# Fixme: Isn't this the ProcSet id?
return pdf_id(len(self.identifiers) + 1)
def get_xref_id(self):
res_id = self.get_resource_id()
return pdf_id(res_id.num + 1)
def add_page(self, width, height):
self.resources.add_proc_ref(ProcRef(self))
page = {"Type" : "/Page",
"MediaBox" : "[0 0 %d %d]" % (width, height),
"Resources" : self.resources,
"Parent" : self.pagesId.reference()
}
id = self._append(page)
self.pages.add(id)
return id
def add_stream(self, stream, page_id):
stream_id = self._append(stream)
page = self.objects[page_id]
page["Contents"] = stream_id.reference()
def add_xobject(self, xobject):
"""Adds the specified xobject and returns the name"""
obj_id = self._append(xobject)
name = self.resources.add_xobject(obj_id)
return name
def add_comment(self, text):
id = pdf_id(len(self.identifiers))
if not id in self.comments:
print("Appended.")
self.comments[id] = []
self.comments[id].append(text)
def _append(self, obj):
id = pdf_id(len(self.identifiers) + 1)
self.identifiers.append(id)
self.objects[id] = obj
if "Type" in obj.keys():
objType = obj["Type"]
if objType in _catalogued:
self.catalog[objType[1:]] = id.reference()
return id
def __str__(self):
s = "%PDF-1.4\n"
# Fixme: also add a row of non-printable characters
index = []
index.append(len(s)) # The Catalog-object follows the initial comment
for key in self.identifiers:
obj = self.objects[key]
comments = self.comments.get(key, [])
for comment in comments:
s += "% " + comment + "\n"
s = s + self._format_obj(key, obj)
index.append(len(s))
# Add the ProcSet before the xref index
s = s + self._format_obj(self.get_resource_id(), self.procSet)
index.append(len(s))
startXref = len(s)
s = s + self._format_xref(index)
s = s + self._format_trailer(len(index))
s = s + "startxref\n%d\n%%%%EOF\n" % startXref
return s
def _format_trailer(self, size):
return "trailer\n<< /Size %d\n/Root 1 0 R\n>>\n" % size
def _format_xref(self, index):
"""Returns a string for the cross-reference table."""
return ("xref\n" +
"0 %d\n" % (len(index)) + # Number of entries
"0000000000 65535 f \n" + # Special entry
" \n".join([str(offset).rjust(10,"0") + " 00000 n" for offset in index[:-1]]) + # Individual entries
" \n") # Final end-line
def _format_obj(self, key, obj):
return ("%s obj\n" % str(key) +
self._format_obj_dict(obj) +
self._format_obj_data(obj)
+ "endobj\n")
def _format_obj_dict(self, obj):
if len(obj) == 0:
return ""
obj_dict = "<< " + "\n".join([_format_entry(key, obj[key])
for key in obj.keys()])
if len(obj.keys()) > 1 :
obj_dict = obj_dict + "\n>>\n"
else:
obj_dict = obj_dict + " >>\n"
return obj_dict
def _format_obj_data(self, obj):
if obj.__class__ == dict:
return ""
return obj.data()
| |
import pytest
import retrying
import shakedown
import time
import traceback
from datetime import timedelta
from dcos.mesos import DCOSClient
from dcos import mesos
from shakedown import *
from utils import *
MAX_CONSECUTIVE_SCALE_FAILS = 9
MAX_HOURS_OF_TEST = 4
EVENT_HEADER = ' event:'
ERROR_LAUNCH = 'Error (launch failure):'
ERROR_SCALING = 'Error (scaling error):'
ERROR_SCALE_TIMEOUT = 'Error (scale timeout):'
ERROR_MARATHON_TIMEOUT = 'Futures timed out:'
ERROR_DEPLOYMENT = 'Error (deployment error):'
FATAL_NOT_SCALING = 'Fatal (not scaling):'
FATAL_CONSECUTIVE_LAUNCH = 'Fatal (consecutive launch):'
FATAL_CONSECUTIVE_SCALING = 'Fatal (consecutive scaling)'
FATAL_CONSECUTIVE_DEPLOYMENT = 'Fatal (consecutive deployment):'
FATAL_CONSECUTIVE_UNDEPLOYMENT = 'Fatal (consecutive undeployment):'
ERRORS = [ERROR_LAUNCH, ERROR_SCALING, ERROR_SCALE_TIMEOUT, ERROR_MARATHON_TIMEOUT,
ERROR_DEPLOYMENT, FATAL_CONSECUTIVE_LAUNCH, FATAL_NOT_SCALING,
FATAL_CONSECUTIVE_DEPLOYMENT, FATAL_CONSECUTIVE_UNDEPLOYMENT, FATAL_CONSECUTIVE_SCALING]
SKIP_RESOURCES = 'Insufficient Resources'
SKIP_PREVIOUS_TEST_FAILED = 'Previous Scale Test Failed'
def app(id=1, instances=1):
app_json = {
"id": "",
"instances": 1,
"cmd": "for (( ; ; )); do sleep 100000000; done",
"cpus": 0.01,
"mem": 32,
"disk": 0
}
if not str(id).startswith("/"):
id = "/" + str(id)
app_json['id'] = id
app_json['instances'] = instances
return app_json
def group(gcount=1, instances=1):
id = "/2deep/group"
group = {
"id": id,
"apps": []
}
for num in range(1, gcount + 1):
app_json = app(id + "/" + str(num), instances)
group['apps'].append(app_json)
return group
def constraints(name, operator, value=None):
constraints = [name, operator]
if value is not None:
constraints.append(value)
return [constraints]
def unique_host_constraint():
return constraints('hostname', 'UNIQUE')
def delete_all_apps():
client = marathon.create_client()
client.remove_group("/", True)
def time_deployment(test=""):
client = marathon.create_client()
start = time.time()
deployment_count = 1
while deployment_count > 0:
# need protection when tearing down
try:
deployments = client.get_deployments()
deployment_count = len(deployments)
if deployment_count > 0:
time.sleep(1)
except:
wait_for_service_endpoint('marathon-user')
pass
end = time.time()
elapse = round(end - start, 3)
return elapse
def delete_group(group="/2deep/group"):
client = marathon.create_client()
client.remove_group(group, True)
def deployment_less_than_predicate(count=10):
client = marathon.create_client()
return len(client.get_deployments()) < count
def launch_group(test_obj):
""" Launches a "group" style test, which is 1 HTTP Post request for all
of the apps defined by count. It is common to launch X apps as a group
with only 1 instance each. It is possible to control the number of instances
of an app.
"""
client = marathon.create_client()
client.create_group(group(test_obj.count, test_obj.instance))
def count_test_app(test_obj):
"""
Runs the `count` scale test for apps in marathon. This is for apps and not pods.
The count test is defined as X number of apps with Y number of instances.
Y is commonly 1 instance and the test is scaling up to X number of applications.
The details of how many apps and how many instances are defined in the test_obj.
This test will make X number of HTTP requests against Marathon.
:param test_obj: Is of type ScaleTest and defines the criteria for the test and logs the results and events of the test.
"""
with clean_marathon_state(test_obj):
# launch
test_obj.start_test()
launch_results = test_obj.launch_results
try:
launch_apps(test_obj)
except Exception as e:
launch_results.failed(e)
wait_for_marathon_up(test_obj)
else:
launch_results.completed()
# deployment
try:
if launch_results.success:
time_deployment2(test_obj)
else:
test_obj.deploy_results.failed("Unable to continue based on launch failure")
except Exception as e:
print(e)
test_obj.deploy_results.failed(e)
def launch_apps(test_obj):
""" This function launches (makes HTTP POST requests) for apps. It is used
for instance and count tests. Instance test will only have 1 count with X
instances and is the simple case.
The group test uses a different launch function.
"""
client = marathon.create_client()
count = test_obj.count
instances = test_obj.instance
launch_results = test_obj.launch_results
deploy_results = test_obj.deploy_results
scale_failure_count = 0
for num in range(1, count + 1):
try:
client.add_app(app(num, instances))
scale_failure_count = 0
# every 100 adds wait for scale up
if not num % 100:
target = num * instances
deploy_results.set_current_scale(current_scale())
# wait for target
if count_deployment(test_obj, target):
abort_msg = 'Count test launch failure at {} out of {}'.format(num, test_obj.target)
test_obj.add_event(abort_msg)
raise Exception(abort_msg)
except Exception as e:
log_error_event(test_obj, e, ERROR_LAUNCH)
scale_failure_count = scale_failure_count + 1
# 9 tries to see if scale increases, if no abort
if scale_failure_count > MAX_CONSECUTIVE_SCALE_FAILS:
abort_msg = 'Aborting based on too many failures: {}'.format(scale_failure_count)
log_error_event(test_obj, abort_msg, FATAL_CONSECUTIVE_DEPLOYMENT, True)
raise Exception(abort_msg)
# need some time
else:
time.sleep(calculate_scale_wait_time(test_obj, scale_failure_count))
quiet_wait_for_marathon_up(test_obj)
def log_error_event(test_obj, message, message_type='', noisy=False):
full_message = '{} {}'.format(message_type, message)
if test_obj is not None:
test_obj.add_event(full_message)
if noisy:
print(full_message)
def instance_test_app(test_obj):
"""
Runs the `instance` scale test for apps in marathon. This is for apps and not pods.
The instance test is defined as 1 app with X number of instances.
the test is scaling up to X number of instances of an application.
The details of how many instances are defined in the test_obj.
This test will make 1 HTTP requests against Marathon.
:param test_obj: Is of type ScaleTest and defines the criteria for the test and logs the results and events of the test.
"""
with clean_marathon_state(test_obj):
# launch
test_obj.start_test()
launch_results = test_obj.launch_results
try:
launch_apps(test_obj)
except:
# service unavail == wait for marathon
launch_results.failed('Failure to launched (but we still will wait for deploys)')
wait_for_marathon_up(test_obj)
else:
launch_results.completed()
# deployment
try:
test_obj.reset_loop_count()
time_deployment2(test_obj)
except Exception as e:
print(e)
test_obj.deploy_results.failed(e)
def group_test_app(test_obj):
"""
Runs the `group` scale test for apps in marathon. This is for apps and not pods.
The group test is defined as X number of apps with Y number of instances.
Y number of instances is commonly 1. The test is scaling up to X number of application and instances as submitted as 1 request.
The details of how many instances are defined in the test_obj.
This test will make 1 HTTP requests against Marathon.
:param test_obj: Is of type ScaleTest and defines the criteria for the test and logs the results and events of the test.
"""
with clean_marathon_state(test_obj):
# launch
test_obj.start_test()
launch_results = test_obj.launch_results
try:
launch_group(test_obj)
except Exception as e:
print(e)
# service unavail == wait for marathon
launch_results.failed(e)
wait_for_marathon_up(test_obj)
else:
launch_results.completed()
# deployment
try:
test_obj.reset_loop_count()
time_deployment2(test_obj)
except Exception as e:
print(e)
test_obj.deploy_results.failed(e)
def delete_all_apps_wait(test_obj=None, msg='undeployment failure'):
""" Used to remove all instances of apps and wait until the deployment finishes
"""
if test_obj is not None and test_obj.deploy_results.current_scale > 0:
test_obj.add_event('Undeploying {} tasks'.format(test_obj.deploy_results.current_scale))
try:
delete_all_apps()
except Exception as e:
log_error_event(test_obj, e, noisy=True)
# some deletes (group test deletes commonly) timeout on remove_app
# however it is a marathon internal issue on getting a timely response
# all tested situations the remove did succeed
try:
undeployment_wait(test_obj)
except Exception as e:
log_error_event(test_obj, e, noisy=True)
assert False, e
def undeployment_wait(test_obj=None):
client = marathon.create_client()
start = time.time()
deployment_count = 1
failure_count = 0
while deployment_count > 0:
# need protection when tearing down
try:
deployments = client.get_deployments()
deployment_count = len(deployments)
if deployment_count > 0:
time.sleep(1)
failure_count = 0
except:
failure_count += 1
# consecutive failures great than x
if failure_count > 10 and test_obj is not None:
test_obj.failed('Too many failures waiting for undeploy', FATAL_CONSECUTIVE_UNDEPLOYMENT)
raise TestException()
wait_for_marathon_up(test_obj)
pass
if test_obj is not None:
test_obj.undeploy_complete(start)
def count_deployment(test_obj, step_target):
deploy_results = test_obj.deploy_results
client = marathon.create_client()
deploying = True
abort = False
failure_count = 0
scale_failure_count = 0
while deploying and not abort:
try:
task_count = current_scale()
deploy_results.set_current_scale(task_count)
deploying = task_count < step_target
if deploying:
time.sleep(calculate_deployment_wait_time(test_obj))
# reset failure count, it is used for consecutive failures
failure_count = 0
quiet_wait_for_marathon_up(test_obj)
abort = abort_deployment_check(test_obj)
scale_failure_count = 0
except DCOSScaleException as e:
# current scale is lower than previous scale
log_error_event(test_obj, e, ERROR_SCALING)
scale_failure_count = scale_failure_count + 1
# consecutive failures
if scale_failure_count > MAX_CONSECUTIVE_SCALE_FAILS:
deploy_results.failed(
'Aborting based on too many failures: {}'.format(scale_failure_count),
FATAL_CONSECUTIVE_SCALING)
abort = True
# need some time
else:
time.sleep(calculate_scale_wait_time(test_obj, scale_failure_count))
quiet_wait_for_marathon_up(test_obj)
except DCOSNotScalingException as e:
deploy_results.failed(e, FATAL_NOT_SCALING)
abort = True
except Exception as e:
log_error_event(test_obj, e, ERROR_DEPLOYMENT)
failure_count = failure_count + 1
# consecutive failures > x will fail test
if failure_count > MAX_CONSECUTIVE_SCALE_FAILS:
message = 'Too many failures query for deployments'
deploy_results.failed(message, FATAL_CONSECUTIVE_DEPLOYMENT)
raise TestException(message)
time.sleep(calculate_deployment_wait_time(test_obj, failure_count))
quiet_wait_for_marathon_up(test_obj)
pass
return abort
def time_deployment2(test_obj):
""" Times the deployment of a launched set of applications for this test object.
This function will wait until the following conditions are met or occur:
* target scale is reached
* 5 consecutive DCOSScaleException (current scale is less than previous scale)
* 1 DCOSNotScalingException (scale hasn't increased for 20 mins)
* Any other exception happens 10x consecutive (this happens at very high scale)
"""
deploy_results = test_obj.deploy_results
client = marathon.create_client()
deploying = True
abort = False
failure_count = 0
scale_failure_count = 0
while deploying and not abort:
try:
task_count = current_scale()
deploy_results.set_current_scale(task_count)
deploying = not deploy_results.is_target_reached()
if deploying:
time.sleep(calculate_deployment_wait_time(test_obj))
# reset failure count, it is used for consecutive failures
failure_count = 0
quiet_wait_for_marathon_up(test_obj)
abort = abort_deployment_check(test_obj)
scale_failure_count = 0
except DCOSScaleException as e:
# current scale is lower than previous scale
log_error_event(test_obj, e, ERROR_SCALING, True)
scale_failure_count = scale_failure_count + 1
if scale_failure_count > MAX_CONSECUTIVE_SCALE_FAILS:
deploy_results.failed(
'Aborting based on too many failures: {}'.format(scale_failure_count),
FATAL_CONSECUTIVE_SCALING)
abort = True
# need some time
else:
time.sleep(calculate_scale_wait_time(test_obj, scale_failure_count))
quiet_wait_for_marathon_up(test_obj)
except DCOSNotScalingException as e:
deploy_results.failed(e, FATAL_NOT_SCALING)
abort = True
except Exception as e:
log_error_event(test_obj, e, ERROR_DEPLOYMENT)
failure_count = failure_count + 1
# consecutive failures > x will fail test
if failure_count > 10:
message = 'Too many failures query for deployments'
print(e)
deploy_results.failed(message, FATAL_CONSECUTIVE_LAUNCH)
raise TestException(message)
time.sleep(calculate_deployment_wait_time(test_obj, failure_count))
quiet_wait_for_marathon_up(test_obj)
loop_msg = 'loop count: {}'.format(test_obj.loop_count)
print(loop_msg)
test_obj.add_event(loop_msg)
if deploy_results.is_target_reached():
deploy_results.completed()
else:
deploy_results.failed('Target NOT reached')
def calculate_scale_wait_time(test_obj, failure_count):
return failure_count * 10
def abort_deployment_check(test_obj):
""" Returns True if we should abort, otherwise False
Currently it looks at time duration of this test (10hrs max)
"""
if elapse_time(test_obj.start) > timedelta(hours=MAX_HOURS_OF_TEST).total_seconds():
log_error_event(test_obj, 'Test taking longer than {} hours'.format(hours), ERROR_SCALE_TIMEOUT)
return True
return False
def calculate_deployment_wait_time(test_obj, failure_count=0):
""" Calculates wait time based potentially a number of factors.
If we need an exponential backoff this is the place.
possbilities:
deploy_results.avg_response_time,
deploy_results.last_response_time
failure_count
outstanding_deployments
current_scale
max response time is 10s, as we approach that bad things happen
return time in seconds to wait
"""
deploy_results = test_obj.deploy_results
wait_time = 1
if deploy_results.last_response_time < 1:
wait_time = 1
elif deploy_results.last_response_time > 8:
wait_time = 5
if failure_count > 3 and failure_count < 7:
wait_time = wait_time + 5
elif failure_count > 7:
wait_time = wait_time + 10
return wait_time
def elapse_time(start, end=None):
if end is None:
end = time.time()
return round(end-start, 3)
def write_meta_data(metadata={}, filename='meta-data.json'):
with open(filename, 'w') as out:
json.dump(metadata, out)
def get_cluster_metadata():
try:
version = ee_version()
except:
version = None
resources = available_resources()
metadata = {
'dcos-version': dcos_version(),
'marathon-version': get_marathon_version(),
'private-agents': len(get_private_agents()),
'master-count': len(shakedown.get_all_masters()),
'resources': {
'cpus': resources.cpus,
'memory': resources.mem
},
'marathon': 'root'
}
if version is not None:
metadata['security'] = version
return metadata
def get_marathon_version():
client = marathon.create_client()
about = client.get_about()
return about.get("version")
def get_mom_json(version='v1.3.6'):
mom_json = get_resource("mom.json")
docker_image = "mesosphere/marathon:{}".format(version)
mom_json['container']['docker']['image'] = docker_image
mom_json['labels']['DCOS_PACKAGE_VERSION'] = version
return mom_json
def install_mom(version='v1.3.6'):
# the docker tags start with v
# however the marathon reports version without the v :(
if not version.startswith('v'):
version = 'v{}'.format(version)
client = marathon.create_client()
client.add_app(get_mom_json(version))
print("Installing MoM: {}".format(version))
deployment_wait()
def uninstall_mom():
try:
framework_id = get_service_framework_id('marathon-user')
if framework_id is not None:
print('uninstalling: {}'.format(framework_id))
dcos_client = mesos.DCOSClient()
dcos_client.shutdown_framework(framework_id)
time.sleep(2)
except:
pass
removed = False
max_times = 10
while not removed:
try:
max_times = max_times - 1
client = marathon.create_client()
client.remove_app('marathon-user')
deployment_wait()
time.sleep(2)
removed = True
except DCOSException:
# remove_app throws DCOSException if it doesn't exist
removed = True
pass
except Exception:
# http or other exception and we retry
traceback.print_exc()
time.sleep(5)
if max_time > 0:
pass
delete_zk_node('universe/marathon-user')
def wait_for_marathon_up(test_obj=None, timeout=60 * 5):
if test_obj is None or 'root' in test_obj.mom:
wait_for_service_endpoint('marathon', timeout)
else:
wait_for_service_endpoint('marathon-user')
def quiet_wait_for_marathon_up(test_obj=None, timeout=60 * 5):
try:
wait_for_marathon_up(test_obj, timeout)
except:
pass
def ensure_test_mom(test_obj):
valid = ensure_mom_version(test_obj.mom_version)
if not valid:
test_obj.failed('Unable to install mom')
return valid
def ensure_mom_version(version):
if not is_mom_version(version):
try:
uninstall_mom()
install_mom(version)
wait_for_service_endpoint('marathon-user', 1200)
except Exception as e:
traceback.print_exc()
return False
return True
def is_mom_version(version):
same_version = False
max_times = 10
check_complete = False
while not check_complete:
try:
max_times == 1
with shakedown.marathon_on_marathon():
client = marathon.create_client()
about = client.get_about()
same_version = version == about.get("version")
check_complete = True
except DCOSException:
# if marathon doesn't exist yet
pass
return False
except Exception as e:
if max_times > 0:
pass
# this failure only happens at very high scale
# it takes a lot of time to recover
wait_for_service_endpoint('marathon-user', 600)
else:
return False
return same_version
class DCOSScaleException(DCOSException):
""" Thrown when the current scale is less than the last reported scale
"""
def __init__(self, message):
self.message = message
def message(self):
return self.message
def __str__(self):
return self.message
class DCOSNotScalingException(DCOSException):
""" Thrown when scale has remained the same for a predetermined amount of time.
"""
def __init__(self, message):
self.message = message
def message(self):
return self.message
def __str__(self):
return self.message
class LaunchResults(object):
""" Provides timing and test data for the first phase of a ScaleTest.
"""
def __init__(self, this_test):
self.success = False
self.avg_response_time = 0.0
self.last_response_time = 0.0
self.start = this_test.start
self.current_test = this_test
def __str__(self):
return "launch success: {} avg response time: {} last response time: {}".format(
self.success,
self.avg_response_time,
self.last_response_time)
def __repr__(self):
return "launch failure: {} avg response time: {} last response time: {}".format(
self.success,
self.avg_response_time,
self.last_response_time)
def current_response_time(self, response_time):
if response_time > 0.0:
self.last_response_time = response_time
if self.avg_response_time == 0.0:
self.avg_response_time = response_time
else:
self.avg_response_time = (self.avg_response_time + response_time)/2
def completed(self):
self.success = True
self.current_response_time(time.time())
self.current_test.add_event('launch successful')
def failed(self, message='', failure_type=ERROR_LAUNCH):
self.success = False
self.current_response_time(time.time())
self.current_test.add_event('{} {}'.format(failure_type, message))
class DeployResults(object):
""" Provides timing and test data for the second phase of a ScaleTest.
"""
def __init__(self, this_test):
self.success = False
self.avg_response_time = 0.0
self.last_response_time = 0.0
self.current_scale = 0
self.target = this_test.target
self.start = this_test.start
self.current_test = this_test
self.end_time = None
def __str__(self):
return "deploy failure: {} avg response time: {} last response time: {} scale: {}".format(
self.failure,
self.avg_response_time,
self.last_response_time,
self.current_scale)
def __repr__(self):
return "deploy failure: {} avg response time: {} last response time: {} scale: {}".format(
self.success,
self.avg_response_time,
self.last_response_time,
self.current_scale)
def set_current_scale(self, task_count):
minutes = 20
# initalize timer (it is reset for ever successful increment)
if self.current_test.loop_count == 0:
self.end_time = time.time() + minutes * 60
self.current_test.increment_loop_count()
# if task_count < current_scale exception
if self.current_scale > task_count:
raise DCOSScaleException('Scaling Failed: Previous scale: {}, Current scale: {}'.format(
self.current_scale,
task_count))
# if scale hasn't changed set timer. check timer.
if self.current_scale < task_count:
self.end_time = time.time() + minutes * 60
self.current_scale = task_count
if time.time() > self.end_time:
raise DCOSNotScalingException("Deployment Scale of {} hasn't changed for {} mins".format(task_count, minutes))
def is_target_reached(self):
return self.current_scale >= self.target
def current_response_time(self, response_time):
if response_time > 0.0:
self.last_response_time = response_time
if self.avg_response_time == 0.0:
self.avg_response_time = response_time
else:
self.avg_response_time = (self.avg_response_time + response_time)/2
def completed(self):
self.success = True
self.current_test.successful()
self.current_response_time(time.time())
self.current_test.add_event('Deployment successful')
self.current_test.add_event('Scale reached: {}'.format(self.current_scale))
def failed(self, message='', failure_type=ERROR_DEPLOYMENT):
self.current_test.failed(message)
self.success = False
self.current_response_time(time.time())
self.current_test.add_event('Scale reached: {}'.format(self.current_scale))
self.current_test.add_event('{} {}'.format(failure_type, message))
class UnDeployResults(object):
""" Provides timing and test data for the last phase of a ScaleTest.
"""
def __init__(self, this_test):
self.success = True
self.avg_response_time = 0.0
self.last_response_time = 0.0
self.start = this_test.start
def __str__(self):
return "undeploy failure: {} avg response time: {} last response time: {}".format(
self.success,
self.avg_response_time,
self.last_response_time)
def __repr__(self):
return "undeploy failure: {} avg response time: {} last response time: {}".format(
self.success,
self.avg_response_time,
self.last_response_time)
class ScaleTest(object):
""" Defines a marathon scale test and collects the scale test data.
A scale test has 3 phases of interest: 1) launching, 2) deploying and 3) undeploying
`under_test` defines apps or pods
`style` defines instance, count or group
instance - is 1 app with X instances (makes 1 http launch call)
count - is X apps with Y (often 1) instances each (makes an http launch for each X)
group - is X apps in 1 http launch call
All events are logged in the events array in order.
A "successful" test is one that completed launch and deployment successfully.
Undeploys that fail are interesting but do not count towards success of scale test.
"""
def __init__(self, name, mom, under_test, style, count, instance):
# test style and criteria
self.name = name
self.under_test = under_test
self.style = style
self.instance = int(instance)
self.count = int(count)
self.start = time.time()
self.mom = mom
self.events = []
self.target = int(instance) * int(count)
# successful, failed, skipped
# failure can happen in any of the test phases below
self.status = 'running'
self.test_time = 0.0
self.undeploy_time = None
self.skipped = False
self.loop_count = 0
# results are in these objects
self.launch_results = LaunchResults(self)
self.deploy_results = DeployResults(self)
self.undeploy_results = UnDeployResults(self)
def __str__(self):
return "test: {} status: {} time: {} events: {}".format(
self.name,
self.status,
self.test_time,
len(self.events))
def __repr__(self):
return "test: {} status: {} time: {} events: {}".format(
self.name,
self.status,
self.test_time,
len(self.events))
def add_event(self, eventInfo):
self.events.append('{} {} (time in test: {})'.format(EVENT_HEADER, eventInfo, pretty_duration_safe(elapse_time(self.start))))
def _status(self, status):
""" end of scale test, however still may have events like undeploy_time
this marks the end of the test time
"""
self.status = status
if 'successful' == status or 'failed' == status:
self.test_time = elapse_time(self.start)
def successful(self):
self.add_event('successful')
self._status('successful')
def failed(self, reason="unknown"):
self.add_event('failed: {}'.format(reason))
self._status('failed')
def skip(self, reason="unknown"):
self.add_event('skipped: {}'.format(reason))
self._status('skipped')
self.skipped = True
def undeploy_complete(self, start):
self.add_event('Undeployment complete')
self.undeploy_time = elapse_time(start)
def start_test(self):
""" Starts the timers for the test. There can be a delay of cleanup of the
cluster between the creation of the ScaleTest object and the real start
of the test. The duration of a scale test is from the point of starting
the launch phase until the end of the deployment phase. The undeployment
is tracked but not counted as part of the test time.
"""
start_time = time.time()
self.start = start_time
self.launch_results.start = start_time
self.deploy_results.start = start_time
self.undeploy_results.start = start_time
def increment_loop_count(self):
self.loop_count = self.loop_count + 1
def reset_loop_count(self):
self.loop_count = 0
def log_events(self):
for event in self.events:
print(event)
def log_stats(self):
print(' *status*: {}, deploy: {}, undeploy: {}'.format(
self.status,
pretty_duration_safe(self.test_time),
pretty_duration_safe(self.undeploy_time)))
def start_test(name, marathons=None):
""" test name example: test_mom1_apps_instances_1_100
with list of marathons to test against. If marathons are None, the root marathon is tested.
"""
test = create_test_object(*name.split("_")[1:])
if marathons is None:
test.mom_version = 'root'
else:
test.mom_version = marathons[test.mom]
return test
def create_test_object(marathon_name='root', under_test='apps', style='instances', num_apps=1, num_instances=1):
test_name = 'test_{}_{}_{}_{}_{}'.format(marathon_name, under_test, style, num_apps, num_instances)
test = ScaleTest(test_name, marathon_name, under_test, style, num_apps, num_instances)
test.mom_version = marathon_name
return test
def scaletest_resources(test_obj):
@retrying.retry(wait_fixed=1000, stop_max_delay=10000)
def get_resource_need():
return resources_needed(test_obj.target, .01, 32)
return get_resource_need()
def outstanding_deployments():
""" Provides a count of deployments still looking to land.
"""
count = 0
client = marathon.create_client()
queued_apps = client.get_queued_apps()
for app in queued_apps:
count = count + app['count']
return count
def is_deployment_active():
client = marathon.create_client()
return len(client.get_deployments()) > 0
def current_scale():
""" Provides a count of tasks which are running on Mesos. The default
app_id is None which provides a count of all tasks.
"""
return len(get_active_tasks())
def current_marathon_scale(app_id=None):
""" Provides a count of tasks which are running on marathon. The default
app_id is None which provides a count of all tasks.
"""
client = marathon.create_client()
tasks = client.get_tasks(app_id)
return len(tasks)
def commaify(number):
return '{:,}'.format(number)
def pretty_duration_safe(duration):
if duration is None:
return None
return pretty_duration(duration)
def clean_root_marathon():
stop_root_marathon()
delete_zk_node('/marathon/leader-curator')
delete_zk_node('/marathon/leader')
delete_zk_node('/marathon/state/framework:id')
delete_zk_node('/marathon/state/internal:storage:version')
delete_zk_node('/marathon/state')
delete_zk_node('/marathon')
start_root_marathon()
wait_for_marathon_up()
def stop_root_marathon():
run_command_on_master('sudo systemctl stop dcos-marathon')
def start_root_marathon():
run_command_on_master('sudo systemctl start dcos-marathon')
def private_resources_available():
return available_resources() - public_resources_available()
def public_resources_available():
return len(get_public_agents()) * Resources(4, 14018.0)
@retrying.retry(wait_fixed=1000, stop_max_delay=3000)
def check_cluster_exists():
response = http.get(shakedown.dcos_url())
assert response.status_code == 200
def ensure_clean_state(test_obj=None):
try:
wait_for_marathon_up(test_obj)
delete_all_apps_wait(test_obj)
except Exception as e:
print(e)
wait_for_marathon_up(test_obj)
@contextlib.contextmanager
def clean_marathon_state(test_obj=None):
ensure_clean_state(test_obj)
yield
ensure_clean_state(test_obj)
def get_test_style_key_base(current_test):
""" The style key is historical and is the key to recording test results.
For root marathon the key is `root_instances` or `root_group`.
"""
return get_style_key_base(current_test.mom, current_test.style)
def get_test_key(current_test, key):
return get_key(current_test.mom, current_test.style, key)
def get_style_key_base(marathon_name, style):
return '{}_{}'.format(marathon_name, style)
def get_key(marathon_name, style, key):
return "{}_{}".format(get_style_key_base(marathon_name, style), key)
def empty_stats():
return {
'root_instances_target': [],
'root_instances_max': [],
'root_instances_deploy_time': [],
'root_instances_human_deploy_time': [],
'root_instances_launch_status': [],
'root_instances_deployment_status': [],
'root_instances_errors': [],
'root_count_target': [],
'root_count_max': [],
'root_count_deploy_time': [],
'root_count_human_deploy_time': [],
'root_count_launch_status': [],
'root_count_deployment_status': [],
'root_count_errors': [],
'root_group_target': [],
'root_group_max': [],
'root_group_deploy_time': [],
'root_group_human_deploy_time': [],
'root_group_launch_status': [],
'root_group_deployment_status': [],
'root_group_errors': []
}
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import decorators
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
@decorators.services_required("neutron")
class TestRouters(helpers.TestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
@property
def routers_page(self):
return self.home_pg.go_to_project_network_routerspage()
def _create_router(self):
routers_page = self.routers_page
routers_page.create_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
def _delete_router(self):
routers_page = self.routers_page
routers_page.delete_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(routers_page.is_router_present(self.ROUTER_NAME))
def test_router_create(self):
"""tests the router creation and deletion functionalities:
* creates a new router for public network
* verifies the router appears in the routers table as active
* deletes the newly created router
* verifies the router does not appear in the table after deletion
"""
self._create_router()
self._delete_router()
def _create_interface(self, interfaces_page):
interfaces_page.create_interface()
interface_name = interfaces_page.interfaces_names[0]
self.assertTrue(
interfaces_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
interfaces_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(interfaces_page.is_interface_present(interface_name))
self.assertTrue(interfaces_page.is_interface_status(
interface_name, 'Down'))
def _delete_interface(self, interfaces_page, interface_name):
interfaces_page.delete_interface(interface_name)
self.assertTrue(
interfaces_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
interfaces_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(interfaces_page.is_interface_present(interface_name))
@decorators.skip_because(bugs=['1792028'])
def test_router_add_delete_interface(self):
"""Tests the router interface creation and deletion functionalities:
* Follows the steps to create a new router
* Clicks on the new router name from the routers table
* Moves to the Interfaces page/tab
* Adds a new Interface for the first subnet id available
* Verifies the new interface is in the routers table by checking that
the interface is present in the table
* Deletes the newly created interface
* Verifies the interface is no longer in the interfaces table
* Switches to the routers view by clicking on the breadcrumb link
* Follows the steps to delete the router
"""
self._create_router()
routers_page = self.routers_page
router_interfaces_page = routers_page. \
go_to_interfaces_page(self.ROUTER_NAME)
self._create_interface(router_interfaces_page)
interface_name = router_interfaces_page.interfaces_names[0]
self._delete_interface(router_interfaces_page, interface_name)
router_interfaces_page.switch_to_routers_page()
self._delete_router()
@decorators.skip_because(bugs=['1792028'])
def test_router_delete_interface_by_row(self):
"""Tests the router interface creation and deletion by row action:
* Follows the steps to create a new router
* Clicks on the new router name from the routers table
* Moves to the Interfaces page/tab
* Adds a new Interface for the first subnet id available
* Verifies the new interface is in the routers table
* Deletes the newly created interface by row action
* Verifies the interface is no longer in the interfaces table
* Switches to the routers view by clicking on the breadcrumb link
* Follows the steps to delete the router
"""
self._create_router()
routers_page = self.routers_page
router_interfaces_page = routers_page. \
go_to_interfaces_page(self.ROUTER_NAME)
self._create_interface(router_interfaces_page)
interface_name = router_interfaces_page.interfaces_names[0]
router_interfaces_page.delete_interface_by_row_action(interface_name)
router_interfaces_page.switch_to_routers_page()
self._delete_router()
@decorators.skip_because(bugs=['1792028'])
def test_router_overview_data(self):
self._create_router()
routers_page = self.routers_page
router_overview_page = routers_page.\
go_to_overview_page(self.ROUTER_NAME)
self.assertTrue(router_overview_page.
is_router_name_present(self.ROUTER_NAME))
self.assertTrue(router_overview_page.is_router_status("Active"))
network_overview_page = router_overview_page.go_to_router_network()
# By default the router is created in the 'public' network so the line
# below checks that such name is present in the network
# details/overview page
self.assertTrue(network_overview_page.is_network_name_present())
self.assertTrue(network_overview_page.is_network_status("Active"))
self._delete_router()
class TestAdminRouters(helpers.AdminTestCase):
ROUTER_NAME = helpers.gen_random_resource_name("router")
@decorators.skip_because(bugs=['1792028'])
@decorators.services_required("neutron")
def test_router_create_admin(self):
"""tests the router creation and deletion functionalities:
* creates a new router for public network
* verifies the router appears in the routers table as active
* edits router name
* checks router name was updated properly
* deletes the newly created router
* verifies the router does not appear in the table after deletion
"""
routers_page = self.home_pg.go_to_project_network_routerspage()
routers_page.create_router(self.ROUTER_NAME)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
self.home_pg.go_to_admin_overviewpage()
admin_routers_page = self.home_pg.go_to_admin_network_routerspage()
self.assertTrue(routers_page.is_router_present(self.ROUTER_NAME))
self.assertTrue(routers_page.is_router_active(self.ROUTER_NAME))
new_name = "edited_" + self.ROUTER_NAME
admin_routers_page.edit_router(self.ROUTER_NAME, new_name=new_name)
self.assertTrue(
admin_routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
admin_routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(
admin_routers_page.is_router_present(new_name))
self.assertTrue(
admin_routers_page.is_router_active(new_name))
admin_routers_page.delete_router(new_name)
self.assertTrue(
admin_routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
admin_routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(admin_routers_page.is_router_present(new_name))
| |
import json
import inspect
from six import integer_types, string_types, text_type, with_metaclass
from springfield.fields import Field, Empty
from springfield.alias import Alias
from springfield import fields
from anticipate.adapt import adapt, AdaptError
from anticipate import adapter
class EntityBase(object):
"""
An empty class that does nothing but allow us to determine
if an Entity references other Entities in the EntityMetaClass.
We can't do this with Entity directly since Entity can't exist
until EntityMetaClass is created but EntityMetaClass can't compare
against Entity since it doesn't exist yet.
"""
class EntityMetaClass(type):
def __new__(mcs, name, bases, attrs):
_fields = {}
aliases = {}
for base in bases:
if hasattr(base, '__fields__'):
_fields.update(base.__fields__)
if hasattr(base, '__aliases__'):
_fields.update(base.__aliases__)
for key, val in list(attrs.items()):
is_cls = inspect.isclass(val)
if isinstance(val, Field):
_fields[key] = val
attrs.pop(key)
elif isinstance(val, Alias):
aliases[key] = val
attrs.pop(key)
elif is_cls and issubclass(val, Field):
_fields[key] = val()
attrs.pop(key)
elif isinstance(val, EntityBase) or (is_cls and issubclass(val, EntityBase)):
# Wrap fields assigned to `Entity`s with an `EntityField`
_fields[key] = fields.EntityField(val)
attrs.pop(key)
elif isinstance(val, list) and len(val) == 1:
attr = val[0]
is_cls = inspect.isclass(attr)
if isinstance(attr, EntityBase) or (is_cls and issubclass(attr, EntityBase)):
# Lists that contain just an Entity class are treated as
# a collection of that Entity
_fields[key] = fields.CollectionField(fields.EntityField(attr))
elif isinstance(attr, Field) or (is_cls and issubclass(attr, Field)):
# Lists that contain just a Field class are treated as
# a collection of that Field
_fields[key] = fields.CollectionField(attr)
for key, field in _fields.items():
attrs[key] = field.make_descriptor(key)
for key, field in aliases.items():
attrs[key] = field.make_descriptor(key)
attrs['__fields__'] = _fields
attrs['__aliases__'] = aliases
new_class = super(EntityMetaClass, mcs).__new__(mcs, name, bases, attrs)
for key, field in _fields.items():
field.init(new_class)
for key, field in aliases.items():
field.init(new_class)
return new_class
class Entity(with_metaclass(EntityMetaClass, EntityBase)):
__values__ = None
__changes__ = None
__fields__ = None
__aliases__ = None
def __init__(self, **values):
# Where the actual values are stored
object.__setattr__(self, '__values__', {})
# List of field names that have changed
object.__setattr__(self, '__changes__', set([]))
self.update(values)
def flatten(self):
"""
Get the values as basic Python types
"""
data = {}
for key, val in self.__values__.items():
val = self.__fields__[key].flatten(val)
data[key] = val
return data
def jsonify(self):
"""
Return a dictionary suitable for JSON encoding.
"""
data = {}
for key, val in self.__values__.items():
val = self.__fields__[key].jsonify(val)
data[key] = val
return data
def to_json(self):
"""
Convert the entity to a JSON string.
"""
return json.dumps(self.jsonify())
@classmethod
def from_json(cls, data):
return cls(**json.loads(data))
def set(self, key, value):
self.__setattr__(key, value)
def get(self, key, default=None, empty=False):
"""
Get a value by key. If passed an iterable, get a dictionary of values matching keys.
:param empty: boolean - Include empty values
"""
if isinstance(key, string_types):
return getattr(self, key, default)
else:
d = {}
for k in key:
if empty:
d[k] = getattr(self, k, default)
else:
v = self.__values__.get(k, Empty)
if v is not Empty:
d[k] = v
return d
def update(self, values):
"""
Update attibutes. Ignore keys that aren't fields.
Allows dot notation.
"""
if hasattr(values, '__values__'):
for key, val in values.__values__.items():
try:
self[key] = val
except KeyError:
pass
else:
for key, val in values.items():
try:
self[key] = val
except KeyError:
pass
def _get_field_path(self, entity, target, path=None):
"""
Use dot notation to get a field and all it's
ancestory fields.
"""
path = path or []
if '.' in target:
name, right = target.split('.', 1)
soak = False
if name.endswith('?'):
# Targets like 'child?.key' use "soak" to allow `child` to be empty
name = name[:-1]
soak = True
field = entity.__fields__[name]
key = '.'.join([f[0] for f in path] + [name])
if isinstance(field, fields.EntityField):
path.append((key, name, field, soak))
return self._get_field_path(field.type, right, path)
else:
raise KeyError('Expected EntityField for %s' % key)
else:
soak = False
if target.endswith('?'):
# Targets like 'child?.key' use "soak" to allow `child` to be empty
target = target[:-1]
soak = True
key = '.'.join([f[0] for f in path] + [target])
path.append((key, target, entity.__fields__[target], soak))
return path
def __setattr__(self, name, value):
"""
Don't allow setting attributes that haven't been defined as fields.
"""
if name in self.__fields__:
object.__setattr__(self, name, value)
else:
raise AttributeError('Field %r not defined.' % name)
# Dict interface
def __getitem__(self, name):
try:
if '.' in name:
pos = self
path = self._get_field_path(self, name)
last = path[-1]
path = path[:-1]
for field_key, field_name, field, soak in path:
if isinstance(field, fields.EntityField):
if not getattr(pos, field_name):
if soak:
return Empty
else:
raise ValueError('%s is empty' % field_key)
pos = getattr(pos, field_name)
else:
raise ValueError('Expected Entity for %s' % field_key)
# This should be the end of our path, just get it
return getattr(pos, last[1])
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __setitem__(self, name, value):
try:
if '.' in name:
pos = self
path = self._get_field_path(self, name)
last = path[-1]
path = path[:-1]
for field_key, field_name, field, soak in path:
if isinstance(field, fields.EntityField):
if not getattr(pos, field_name):
# Create a new Entity instance
setattr(pos, field_name, field.type())
pos = getattr(pos, field_name)
else:
raise ValueError('Expected Entity for %s' % field_key)
# This should be the end of our path, just set it
return setattr(pos, last[1], value)
return setattr(self, name, value)
except AttributeError:
pass
raise KeyError(name)
def __delitem__(self, name):
if name in self.__fields__:
del self.__values__[name]
else:
raise KeyError('Field %r not defined.' % name)
def __contains__(self, name):
return name in self.__values__
def __len__(self):
return len(self.__values__)
def iteritems(self):
return self.__values__.items()
def items(self):
return self.__values__.items()
def clear(self):
return self.__values__.clear()
def __iter__(self):
return iter(self.__values__)
@classmethod
def adapt(cls, obj):
return adapt(obj, cls)
@classmethod
def adapt_all(cls, obj):
return (adapt(i, cls) for i in obj)
def __repr__(self):
return u'<%s %s>' % (self.__class__.__name__, json.dumps(dict(((k, text_type(v)) for k, v in self.__values__.items()))).replace('"', ''))
def __getstate__(self):
"""Pickle state"""
return {
'__values__' : self.__values__,
'__changes__': self.__changes__
}
def __setstate__(self, data):
"""Restore Pickle state"""
object.__setattr__(self, '__values__', data['__values__'])
object.__setattr__(self, '__changes__', data['__changes__'])
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.__values__ == other.__values__
def __hash__(self):
return id(self)
def __neq__(self, other):
return not self.__eq__(other)
class FlexEntity(Entity):
"""
An Entity that can have extra attributes added to it.
"""
__flex_fields__ = None
def __init__(self, **values):
object.__setattr__(self, '__flex_fields__', set([]))
super(FlexEntity, self).__init__(**values)
def __setattr__(self, name, value):
if name in self.__fields__:
object.__setattr__(self, name, value)
else:
self.__values__[name] = value
self.__flex_fields__.add(name)
self.__changes__.add(name)
def __getattr__(self, name, default=None):
return self.__values__.get(name, default)
def update(self, values):
for key, val in values.items():
self.set(key, val)
def _flatten_value(self, val):
"""
Have to guess at how to flatten non-fielded values
"""
if val is None:
return None
elif val is Empty:
return None
elif isinstance(val, Entity):
val = val.flatten()
elif isinstance(val, (tuple, list)) or inspect.isgenerator(val):
vals = []
for v in val:
vals.append(self._flatten_value(v))
val = vals
elif isinstance(val, dict):
data = {}
for k, v in val.items():
data[k] = self._flatten_value(v)
val = data
elif not isinstance(val, (float,) + integer_types + string_types):
val = text_type(val)
return val
def _jsonify_value(self, val):
if val is None:
val = None
elif val is Empty:
val = None
elif isinstance(val, Entity):
val = val.jsonify()
elif isinstance(val, dict):
data = {}
for k, v in val.items():
data[k] = self._jsonify_value(v)
val = data
elif isinstance(val, (tuple, list)) or inspect.isgenerator(val):
vals = []
for v in val:
vals.append(self._jsonify_value(v))
val = vals
else:
field = fields.get_field_for_type(val)
if field:
val = field.jsonify(val)
return val
def flatten(self):
"""
Get the values as basic Python types
"""
data = {}
for key, val in self.__values__.items():
if key in self.__fields__:
val = self.__fields__[key].flatten(val)
else:
val = self._flatten_value(val)
data[key] = val
return data
def jsonify(self):
"""
Get the values as basic Python types
"""
data = {}
for key, val in self.__values__.items():
if key in self.__fields__:
val = self.__fields__[key].jsonify(val)
else:
val = self._jsonify_value(val)
data[key] = val
return data
@adapter((Entity, dict), Entity)
def to_entity(obj, to_cls):
e = to_cls()
if isinstance(obj, Entity):
# obj is an Entity
e.update(obj.flatten())
return e
elif isinstance(obj, dict):
e.update(obj)
return e
raise AdaptError('to_entity could not adapt.')
| |
#!/usr/bin/env python
import sys, os, itertools, shutil, getopt, re, time
import const
import pdb, traceback
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
from datetime import *
import metacomm.combinatorics.all_pairs2
import unittest
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
Manifest_Row = 0
Device_Ip = ""
Device_Ip_List = []
Device_SSH_List = []
Pack_Type = "xpk"
Test_Flag = "positive"
Test_Device_Type = "ssh"
test_start_time = datetime.now().strftime('%m-%d-%H:%M:%S')
reload(sys)
sys.setdefaultencoding( "utf-8" )
def do_Selfcom(self_combin_file,out_file):
try:
file = open(self_combin_file)
allpairs_in = open(out_file,'a+')
while 1:
line = file.readline()
if not line:
break
allpairs_in.writelines(line + "\n")
file.close()
allpairs_in.close()
return
except Exception,e:
print Exception,":",e
def fileline_count(fp):
return len(open(fp).readlines())
def del_Seed1(in_file):
try:
caseline = ""
row = 0
file = open(in_file)
items = []
self_file = []
s_name = p_name = ""
if (os.path.isdir("self")):
do_Clear(const.path +"/self")
os.mkdir(const.path + "/self")
while 1:
p_name = s_name
line = file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
s_name = items[0].split("-")[0]
if ((p_name!=s_name) and (p_name!="")):
fp=open(const.path + "/self/" + s_name + "_input.txt",'a+')
fp.writelines(line + "\n")
else:
fp= open(const.path + "/self/" + s_name + "_input.txt",'a+')
fp.writelines(line + "\n")
if (s_name!=p_name):
self_file.append(s_name)
fp.close()
file.close()
if (os.path.isfile(const.selfcomb_file)):
os.remove(const.selfcomb_file)
for i in range (0,len(self_file)):
line_count = fileline_count(const.path + "/self/" + self_file[i] + "_input.txt")
if (line_count >= 2):
lists = [[] for m in range(line_count)]
open_input_file = open(const.path + "/self/" + self_file[i] + "_input.txt",'a+')
while 1:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_item= items[1].split(",")
for g in get_item:
lists[row].append(g)
row = row + 1
input_pair = all_pairs( lists )
open_input_file.close()
output_pair = open(const.path + "/self/" + self_file[i] + "_output.txt",'a+')
for e, v in enumerate(input_pair):
for c in range(0,len(v)):
caseline = caseline + v[c]
caseline = caseline + ","
output_pair.writelines(self_file[i] + ":" + caseline[:-1])
output_pair.close()
else:
open_input_file = open(const.path + "/self/" + self_file[i] + "_input.txt",'r')
output_pair = open(const.path + "/self/" + self_file[i] + "_output.txt",'a+')
while 1:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
output_pair.writelines(line)
output_pair.close()
open_input_file .close()
#1*********input_seed -> selfcomb.txt
# if more self combination, each self generate itself output file,finally all self_input generate one selfcomb.txt
do_Selfcom(const.path + "/self/" + self_file[i] + "_output.txt",const.selfcomb_file)
row = 0
#2*********selfcomb -> output file by allpairs
gen_selfcomb_File1(const.selfcomb_file, in_file)
#3*********output -> manifest.json
gen_Manifest_Json1(const.output_file, in_file)
return "Manifest.json output ------------------------->O.K"
except Exception,e:
print Exception,":",e
return "Manifest.json output ------------------------->Error"
def gen_Manifest_Json1(output_file,in_file):
try:
global Manifest_Row
global Pack_Type
manifest="{\n "
file = open(output_file)
if (Test_Flag=="positive"):
testfile = open("test.py" ,'w+')
testfile.writelines("#!/usr/bin/env python \n# coding=utf-8 \nimport random,os,sys,unittest,run_test,codecs \nreload(sys) \nsys.setdefaultencoding( \"utf-8\" ) \nclass TestCaseUnit(unittest.TestCase): \n ")
name_list=[]
get_self=""
line = file.readline().strip('\n\r')
items = line.split(" ")
counters = len(items)
try:
os.mkdir(const.path + "/tcs")
except:
print "make tcs folder error"
for i in items:
name_list.append(i)
while 1:
line = file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(" ")
counters = len(items)
os.mkdir(const.path + "/tcs/Crosswalk-Manifest-Check" + str(Manifest_Row+1))
fp = open(const.path + "/tcs/Crosswalk-Manifest-Check"+str(Manifest_Row+1) + "/manifest.json",'w')
for i in range(0,len(items)):
if ((name_list[i])!="icons" and (name_list[i])!="xwalk_permissions" and (name_list[i])!="xwalk_launch_screen"):
if (items[i].find("000")!=-1):
items[i] = items[i].replace("000"," ")
get_self = get_self + "\"" + name_list[i] + "\"" + " : " + "\"" +items[i].replace("null","") + "\",\n"
else:
get_self = get_self + "\"" + name_list[i].strip() + "\"" + " : " + "\""+items[i].replace("null","") + "\",\n"
else:
items[i] = items[i].replace("comma",",")
get_self = get_self + "\"" + name_list[i] + "\"" + " : "+items[i].replace("null","") + ",\n"
get_self = "{\n" + get_self[:-2] + "\n}"
fp.writelines(get_self)
print "\n-----------------------------------------------------------",items[0]
print get_self
testfile.writelines("\n def test_case_" + str(Manifest_Row+1) +"(self):\n self.assertEqual(\"Pass\", run_test.run_test_result(\"Crosswalk-Manifest-Check" + str(Manifest_Row+1) +"\"," + "\""+items[0].decode("utf-8") +"\"" + "))" + "\n " )
Manifest_Row = Manifest_Row+1
get_self=""
testfile.writelines("\nif __name__ == '__main__':\n suite1 = unittest.TestLoader().loadTestsFromTestCase(TestCaseUnit)\n suite = unittest.TestSuite([suite1])\n unittest.TextTestRunner(verbosity=2).run(suite) " )
file.close()
testfile.close()
return "<--------------- Generate manifest.json O.K ------------------>"
except Exception,e:
print Exception,"------------------------->:",e
print traceback.format_exc()
return "Generate manifest.json error"
def gen_selfcomb_File1(comb_file,in_file):
try:
#if (os.path.isfile("./allpairs/output.txt") & (Test_Flag=="positive")):
do_Clear("./allpairs/output.txt")
#do_Clear("./allpairs/output_negative.txt")
if (Test_Flag=="negative"):
open_output_file= open(const.output_file_ne,'a+')
else:
open_output_file= open(const.output_file,'a+')
caseline = ""
get_items = ""
get_case = ""
row = 0
line_count = fileline_count(comb_file)
if (line_count >= 2):
lists = [[] for m in range(line_count)]
open_input_file= open(comb_file)
while 1:
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")
get_items = get_items + items[0].split("-")[0] + "\t"
open_output_file.writelines(get_items.rstrip("\t") + "\n")
open_input_file.close()
open_input_file= open(comb_file)
for i in range(0,len(lists)):
line = open_input_file.readline()
if not line:
break
line = line.strip('\n\r')
items = line.split(":")#items[0]=field;#item[1]=value
value = line[len(items[0])+1:]
get_item= value.split(",")
for g in get_item:
lists[row].append(g)
row = row + 1
#print lists
input_pair = all_pairs( lists )
for e, v in enumerate(input_pair):
for c in range(0,len(v)):
get_case = get_case + v[c]+"\t"
open_output_file.writelines(get_case.rstrip("\t") + "\n")
get_case=""
open_output_file.close()
return "Generate selfcombination file ------------------------->O.K"
except:
print traceback.format_exc()
def app_Folder(path_tcs):
try:
for file in os.listdir(path_tcs):
copy_Files(const.path_resource,os.getcwd()+"/tcs/"+file)
return "Webapp folder copy ------------------------->O.K",path_tcs
except Exception,e:
print Exception,":",e
return "Webapp folder copy ------------------------->error",path_tcs
def copy_Files(sourceDir, targetDir):
try:
copyFileCounts = 0
for f in os.listdir(sourceDir):
sourceF = os.path.join(sourceDir, f)
targetF = os.path.join(targetDir, f)
if os.path.isfile(sourceF):
#create folder
if not os.path.exists(targetDir):
os.makedirs(targetDir)
copyFileCounts = copyFileCounts + 1
#if not exist to copy
if not os.path.exists(targetF):
#copy file
open(targetF, "wb").write(open(sourceF, "rb").read())
#else:
# print ("file exist do not copy")
if os.path.isdir(sourceF):
copy_Files(sourceF, targetF)
return "Copy File O.k",sourceDir,"------------------------->", targetDir
except Exception,e:
print Exception,":",e
return "Copy File error",sourceDir,"------------------------->", targetDir
def do_Clear(sourceDir):
try:
if (os.path.exists(sourceDir)):
if (os.path.isdir(sourceDir)):
shutil.rmtree(sourceDir)
else:
os.remove(sourceDir)
except IOError,e:
print Exception,"Clear :"+ sourceDir + " ------------------------->error",e
def Usage():
print "<-------------------------test.py usage:------------------------->"
print "-h,--help: print help message"
print "-n, --negative seed test"
print "-o, --order: input allpairs order default 2"
print "-p, --pack: pack xpk or wgt default wgt"
print "--foo: Test option "
def main(argv):
try:
global Pack_Type
global Test_Flag
global Test_Device_Type
do_Clear(const.path_tcs)
do_Clear(const.path + "/self")
do_Clear(const.report_path + "/manifest_all_positive.txt")
do_Clear(const.report_path + "/manifest_all_negative.txt")
os.system("rm -f " + const.seed_negative + "/*~")
os.system("rm -f " + const.seed_positive + "/*~")
opts, args = getopt.getopt(argv[1:], 'h:o:p:n', ['help','order=','pack='])
if (len(opts) ==0):
print "Auto generate manifest.json------------------------->",opts
#input_seed -> selfcomb.txt->manifest.json
del_Seed1(const.seed_file)
for o, a in opts:
if o in ('-h', '--help'):
Usage()
sys.exit(1)
elif o in ('-n'):
print ("**************negative**********" )
Test_Flag = "negative"
if (Test_Flag=="negative"):
del_Seed(const.seed_file_na)
else:
del_Seed(const.seed_file)
elif o in ('-o', '--order'):
allpairs_order_get = a
print "Auto generate manifest.json------------------------->"
#create all manifest.json->positive.txt and nagative.txt
print "------------------>"
#del_Seed1(const.seed_file)
#import testfile
#suite1 = unittest.TestLoader().loadTestsFromTestCase(testfile.TestCaseUnit)
#manifest folder -> webapp
#app_Folder(const.path_tcs)
do_Clear(const.path + "/self")
elif o in ('--foo', ):
sys.exit(0)
elif o in ('-p','--pack' ):
print "Auto generate manifest.json------------------------->",opts
#input_seed -> selfcomb.txt->manifest.json
Pack_Type = a
print "Pack_Type------------------------->",Pack_Type
sys.exit(0)
else:
print "***unhandled option***"
sys.exit(3)
except Exception,e:
print Exception,":",e
print traceback.format_exc()
Usage()
sys.exit(2)
finally:
do_Clear(const.path + "/opt")
do_Clear(const.path + "/self")
os.system("rm -rf *.zip")
os.system("rm -rf *.pem")
if __name__=="__main__":
main(sys.argv)
| |
# Knight.py (Springer / Pferd)
from __future__ import division
from OpenGL.GL import *
from OpenGL.GLU import *
from defines import *
from objloader import *
from openGLFunctions import set_color
import Piece
# noinspection PyPep8Naming
class Knight(Piece.Piece):
def __init__(self, c, x, y, useobj):
if c == objWhite:
Piece.Piece.__init__(self, knight, white, x, y)
elif c == objBlack:
Piece.Piece.__init__(self, knight, black, x, y)
else:
Piece.Piece.__init__(self, knight, c, x, y)
self.height1 = 3.6
self.radius1 = 0.5
self.radius = 0.55 * standardFactor
self.objFilePath = "obj/Knight.obj"
self.useObj = useobj
self.obj = None
if self.useObj:
if c == objWhite:
self.objFilePath = "obj/KnightWhite.obj"
self.obj = OBJ(self.objFilePath, swapyz=True, listindex=drawWhiteKnightObj)
else:
self.objFilePath = "obj/KnightBlack.obj"
self.obj = OBJ(self.objFilePath, swapyz=True, listindex=drawBlackKnightObj)
def draw(self):
if self.life == alive:
glPushMatrix()
self.translate()
if self.factor == 1:
if self.color == white and not self.useObj:
glCallList(drawWhiteKnight)
elif self.color == black and not self.useObj:
glCallList(drawBlackKnight)
elif self.useObj:
glScalef(figureScale, figureScale, figureScale)
set_color(self.color, normal)
glCallList(self.obj.gl_list)
else:
self.drawMe(normal)
glPopMatrix()
def drawShadow(self):
if self.life == alive:
glPushMatrix()
self.translate()
self.drawMe(shadow)
glPopMatrix()
def drawMe(self, colorMode):
if self.useObj == true:
glScalef(figureScale, figureScale, figureScale)
set_color(self.color, colorMode)
glCallList(self.obj.gl_list)
else:
self.drawFeet(colorMode)
# glColor3f (0,0,0)
set_color(self.color, colorMode)
glTranslated(0, 0, 3.6 * standardFactor * self.factor)
#connection:
glBegin(GL_QUAD_STRIP)
glNormal3d(0, 1, 0)
glVertex3d(-self.radius * self.factor, 1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(0, 1, 0)
glVertex3d(self.radius * self.factor, 1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
#glNormal3d(1,0,0)
glVertex3d(-self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
#glNormal3d(1,0,0)
glVertex3d(self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(0, -1, 0)
glVertex3d(-self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(0, -1, 0)
glVertex3d(self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(0, -0.5, -0.5)
glVertex3d(-self.radius * self.factor, -0.5 * standardFactor * self.factor, 0)
glNormal3d(0, -0.5, -0.5)
glVertex3d(self.radius * self.factor, -0.5 * standardFactor * self.factor, 0)
glNormal3d(0, 0.5, -0.5)
glVertex3d(-self.radius * self.factor, 0.5 * standardFactor * self.factor, 0)
glNormal3d(0, 0.5, -0.5)
glVertex3d(self.radius * self.factor, 0.5 * standardFactor * self.factor, 0)
glEnd()
#'right' bottom polygon
glBegin(GL_POLYGON)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.5 * standardFactor * self.factor, 0)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -0.5 * standardFactor * self.factor, 0)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glEnd()
#'left' bottom polygon
glBegin(GL_POLYGON)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -0.5 * standardFactor * self.factor, 0)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.5 * standardFactor * self.factor, 0)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glEnd()
#top:
glBegin(GL_POLYGON)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.75 * standardFactor * self.factor,
3 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.85 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
#tail:
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.5 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0, 3.8 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -0.5 * standardFactor * self.factor,
3.65 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -0.8 * standardFactor * self.factor,
3.4 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -standardFactor * self.factor, 3.15 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -1.25 * standardFactor * self.factor,
2.8 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -1.5 * standardFactor * self.factor,
2.3 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -1.7 * standardFactor * self.factor,
1.8 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, -1.83 * standardFactor * self.factor,
1.2 * standardFactor * self.factor)
glEnd()
glBegin(GL_POLYGON)
#tail:
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -1.83 * standardFactor * self.factor,
1.2 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -1.7 * standardFactor * self.factor,
1.8 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -1.5 * standardFactor * self.factor,
2.3 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -1.25 * standardFactor * self.factor,
2.8 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -standardFactor * self.factor, 3.15 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -0.8 * standardFactor * self.factor,
3.4 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -0.5 * standardFactor * self.factor,
3.65 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0, 3.8 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.5 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.85 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.75 * standardFactor * self.factor, 3 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glEnd()
#connection:
glBegin(GL_QUAD_STRIP)
#glNormal3d()
glVertex3d(-self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
#glNormal3d(1,0,0)
glVertex3d(self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(0, 0.4, -0.6)
glVertex3d(-self.radius * self.factor, 0.75 * standardFactor * self.factor,
3 * standardFactor * self.factor)
glNormal3d(0, 0.4, -0.6)
glVertex3d(self.radius * self.factor, 0.75 * standardFactor * self.factor, 3 * standardFactor * self.factor)
glNormal3d(0, 0.5, 0.5)
glVertex3d(-self.radius * self.factor, 0.85 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
glNormal3d(0, 0.5, 0.5)
glVertex3d(self.radius * self.factor, 0.85 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
#tail:
glNormal3d(0, -0.1, 0.9)
glVertex3d(-self.radius * self.factor, 0.5 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
glNormal3d(0, -0.1, 0.9)
glVertex3d(self.radius * self.factor, 0.5 * standardFactor * self.factor,
3.9 * standardFactor * self.factor)
glNormal3d(0, -0.3, 0.7)
glVertex3d(-self.radius * self.factor, 0, 3.8 * standardFactor * self.factor)
glNormal3d(0, -0.3, 0.7)
glVertex3d(self.radius * self.factor, 0, 3.8 * standardFactor * self.factor)
glNormal3d(0, -0.2, 0.8)
glVertex3d(-self.radius * self.factor, -0.5 * standardFactor * self.factor,
3.65 * standardFactor * self.factor)
glNormal3d(0, -0.2, 0.8)
glVertex3d(self.radius * self.factor, -0.5 * standardFactor * self.factor,
3.65 * standardFactor * self.factor)
glNormal3d(0, -0.35, 0.7)
glVertex3d(-self.radius * self.factor, -0.8 * standardFactor * self.factor,
3.4 * standardFactor * self.factor)
glNormal3d(0, -0.35, 0.7)
glVertex3d(self.radius * self.factor, -0.8 * standardFactor * self.factor,
3.4 * standardFactor * self.factor)
glNormal3d(0, -0.45, 0.5)
glVertex3d(-self.radius * self.factor, -standardFactor * self.factor, 3.15 * standardFactor * self.factor)
glNormal3d(0, -0.45, 0.5)
glVertex3d(self.radius * self.factor, -standardFactor * self.factor, 3.15 * standardFactor * self.factor)
glNormal3d(0, -0.55, 0.4)
glVertex3d(-self.radius * self.factor, -1.25 * standardFactor * self.factor,
2.8 * standardFactor * self.factor)
glNormal3d(0, -0.55, 0.4)
glVertex3d(self.radius * self.factor, -1.25 * standardFactor * self.factor,
2.8 * standardFactor * self.factor)
glNormal3d(0, -0.7, 0.3)
glVertex3d(-self.radius * self.factor, -1.5 * standardFactor * self.factor,
2.3 * standardFactor * self.factor)
glNormal3d(0, -0.7, 0.3)
glVertex3d(self.radius * self.factor, -1.5 * standardFactor * self.factor,
2.3 * standardFactor * self.factor)
glNormal3d(0, -0.9, 0.1)
glVertex3d(-self.radius * self.factor, -1.7 * standardFactor * self.factor,
1.8 * standardFactor * self.factor)
glNormal3d(0, -0.9, 0.1)
glVertex3d(self.radius * self.factor, -1.7 * standardFactor * self.factor,
1.8 * standardFactor * self.factor)
glNormal3d(0, -1, 0)
glVertex3d(-self.radius * self.factor, -1.83 * standardFactor * self.factor,
1.2 * standardFactor * self.factor)
glNormal3d(0, -1, 0)
glVertex3d(self.radius * self.factor, -1.83 * standardFactor * self.factor,
1.2 * standardFactor * self.factor)
glNormal3d(0, -1, 0)
glVertex3d(-self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glNormal3d(0, -1, 0)
glVertex3d(self.radius * self.factor, -1.9 * standardFactor * self.factor,
0.4 * standardFactor * self.factor)
glEnd()
#nose:
glBegin(GL_POLYGON)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 1.55 * standardFactor * self.factor,
1.45 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 1.5 * standardFactor * self.factor,
2.55 * standardFactor * self.factor)
glNormal3d(-1, 0, 0)
glVertex3d(-self.radius * self.factor, 0.75 * standardFactor * self.factor,
3 * standardFactor * self.factor)
glEnd()
glBegin(GL_POLYGON)
glNormal3d(0, 0.2, 0.7)
glVertex3d(-self.radius * self.factor, 0.75 * standardFactor * self.factor,
3 * standardFactor * self.factor)
glNormal3d(0, 0.2, 0.7)
glVertex3d(-self.radius * self.factor, 1.5 * standardFactor * self.factor,
2.55 * standardFactor * self.factor)
glNormal3d(0, 0.2, 0.7)
glVertex3d(self.radius * self.factor, 1.5 * standardFactor * self.factor,
2.55 * standardFactor * self.factor)
glNormal3d(0, 0.2, 0.7)
glVertex3d(self.radius * self.factor, 0.75 * standardFactor * self.factor, 3 * standardFactor * self.factor)
glEnd()
glBegin(GL_POLYGON)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.75 * standardFactor * self.factor, 3 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 1.5 * standardFactor * self.factor,
2.55 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 1.55 * standardFactor * self.factor,
1.45 * standardFactor * self.factor)
glNormal3d(1, 0, 0)
glVertex3d(self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glEnd()
glBegin(GL_POLYGON)
glNormal3d(0, -0.1, -0.8)
glVertex3d(self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glNormal3d(0, -0.1, -0.8)
glVertex3d(self.radius * self.factor, 1.55 * standardFactor * self.factor,
1.45 * standardFactor * self.factor)
glNormal3d(0, -0.1, -0.8)
glVertex3d(-self.radius * self.factor, 1.55 * standardFactor * self.factor,
1.45 * standardFactor * self.factor)
glNormal3d(0, -0.1, -0.8)
glVertex3d(-self.radius * self.factor, 0.35 * standardFactor * self.factor,
1.7 * standardFactor * self.factor)
glEnd()
#nose front:
glBegin(GL_POLYGON)
glNormal3d(0, 1, 0.1)
glVertex3d(-self.radius * self.factor, 1.55 * standardFactor * self.factor,
1.45 * standardFactor * self.factor)
glNormal3d(0, 1, 0.1)
glVertex3d(-self.radius * self.factor, 1.5 * standardFactor * self.factor,
2.55 * standardFactor * self.factor)
glNormal3d(0, 1, 0.1)
glVertex3d(self.radius * self.factor, 1.5 * standardFactor * self.factor,
2.55 * standardFactor * self.factor)
glNormal3d(0, 1, 0.1)
glVertex3d(self.radius * self.factor, 1.55 * standardFactor * self.factor,
1.45 * standardFactor * self.factor)
glEnd()
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.release} and L{twisted.python._release}.
All of these tests are skipped on platforms other than Linux, as the release is
only ever performed on Linux.
"""
from __future__ import print_function
import glob
import functools
import operator
import os
import sys
import textwrap
import tempfile
import shutil
from io import BytesIO as StringIO
from twisted.trial.unittest import TestCase, FailTest, SkipTest
from twisted.python.procutils import which
from twisted.python import release
from twisted.python.filepath import FilePath
from incremental import Version
from subprocess import CalledProcessError
from twisted.python._release import (
findTwistedProjects, replaceInFile, Project, filePathDelta,
APIBuilder, BuildAPIDocsScript, CheckTopfileScript,
runCommand, NotWorkingDirectory, SphinxBuilder,
GitCommand, getRepositoryCommand, IVCSCommand)
if os.name != 'posix':
skip = "Release toolchain only supported on POSIX."
else:
skip = None
testingSphinxConf = "master_doc = 'index'\n"
try:
import pydoctor.driver
# it might not be installed, or it might use syntax not available in
# this version of Python.
except (ImportError, SyntaxError):
pydoctorSkip = "Pydoctor is not present."
else:
if getattr(pydoctor, "version_info", (0,)) < (0, 1):
pydoctorSkip = "Pydoctor is too old."
else:
pydoctorSkip = skip
if not skip and which("sphinx-build"):
sphinxSkip = None
else:
sphinxSkip = "Sphinx not available."
if not skip and which("git"):
gitVersion = runCommand(["git", "--version"]).split(" ")[2].split(".")
# We want git 2.0 or above.
if int(gitVersion[0]) >= 2:
gitSkip = skip
else:
gitSkip = "old git is present"
else:
gitSkip = "git is not present."
class ExternalTempdirTestCase(TestCase):
"""
A test case which has mkdir make directories outside of the usual spot, so
that Git commands don't interfere with the Twisted checkout.
"""
def mktemp(self):
"""
Make our own directory.
"""
newDir = tempfile.mkdtemp(dir="/tmp/")
self.addCleanup(shutil.rmtree, newDir)
return newDir
def _gitConfig(path):
"""
Set some config in the repo that Git requires to make commits. This isn't
needed in real usage, just for tests.
@param path: The path to the Git repository.
@type path: L{FilePath}
"""
runCommand(["git", "config",
"--file", path.child(".git").child("config").path,
"user.name", '"someone"'])
runCommand(["git", "config",
"--file", path.child(".git").child("config").path,
"user.email", '"someone@someplace.com"'])
def _gitInit(path):
"""
Run a git init, and set some config that git requires. This isn't needed in
real usage.
@param path: The path to where the Git repo will be created.
@type path: L{FilePath}
"""
runCommand(["git", "init", path.path])
_gitConfig(path)
def genVersion(*args, **kwargs):
"""
A convenience for generating _version.py data.
@param args: Arguments to pass to L{Version}.
@param kwargs: Keyword arguments to pass to L{Version}.
"""
return ("from incremental import Version\n__version__=%r" % (
Version(*args, **kwargs))).encode('ascii')
class StructureAssertingMixin(object):
"""
A mixin for L{TestCase} subclasses which provides some methods for
asserting the structure and contents of directories and files on the
filesystem.
"""
def createStructure(self, root, dirDict):
"""
Create a set of directories and files given a dict defining their
structure.
@param root: The directory in which to create the structure. It must
already exist.
@type root: L{FilePath}
@param dirDict: The dict defining the structure. Keys should be strings
naming files, values should be strings describing file contents OR
dicts describing subdirectories. All files are written in binary
mode. Any string values are assumed to describe text files and
will have their newlines replaced with the platform-native newline
convention. For example::
{"foofile": "foocontents",
"bardir": {"barfile": "bar\ncontents"}}
@type dirDict: C{dict}
"""
for x in dirDict:
child = root.child(x)
if isinstance(dirDict[x], dict):
child.createDirectory()
self.createStructure(child, dirDict[x])
else:
child.setContent(dirDict[x].replace('\n', os.linesep))
def assertStructure(self, root, dirDict):
"""
Assert that a directory is equivalent to one described by a dict.
@param root: The filesystem directory to compare.
@type root: L{FilePath}
@param dirDict: The dict that should describe the contents of the
directory. It should be the same structure as the C{dirDict}
parameter to L{createStructure}.
@type dirDict: C{dict}
"""
children = [each.basename() for each in root.children()]
for pathSegment, expectation in dirDict.items():
child = root.child(pathSegment)
if callable(expectation):
self.assertTrue(expectation(child))
elif isinstance(expectation, dict):
self.assertTrue(child.isdir(), "%s is not a dir!"
% (child.path,))
self.assertStructure(child, expectation)
else:
actual = child.getContent().replace(os.linesep, '\n')
self.assertEqual(actual, expectation)
children.remove(pathSegment)
if children:
self.fail("There were extra children in %s: %s"
% (root.path, children))
class ProjectTests(ExternalTempdirTestCase):
"""
There is a first-class representation of a project.
"""
def assertProjectsEqual(self, observedProjects, expectedProjects):
"""
Assert that two lists of L{Project}s are equal.
"""
self.assertEqual(len(observedProjects), len(expectedProjects))
observedProjects = sorted(observedProjects,
key=operator.attrgetter('directory'))
expectedProjects = sorted(expectedProjects,
key=operator.attrgetter('directory'))
for observed, expected in zip(observedProjects, expectedProjects):
self.assertEqual(observed.directory, expected.directory)
def makeProject(self, version, baseDirectory=None):
"""
Make a Twisted-style project in the given base directory.
@param baseDirectory: The directory to create files in
(as a L{FilePath).
@param version: The version information for the project.
@return: L{Project} pointing to the created project.
"""
if baseDirectory is None:
baseDirectory = FilePath(self.mktemp())
segments = version[0].split('.')
directory = baseDirectory
for segment in segments:
directory = directory.child(segment)
if not directory.exists():
directory.createDirectory()
directory.child('__init__.py').setContent('')
directory.child('topfiles').createDirectory()
directory.child('_version.py').setContent(genVersion(*version))
return Project(directory)
def makeProjects(self, *versions):
"""
Create a series of projects underneath a temporary base directory.
@return: A L{FilePath} for the base directory.
"""
baseDirectory = FilePath(self.mktemp())
for version in versions:
self.makeProject(version, baseDirectory)
return baseDirectory
def test_getVersion(self):
"""
Project objects know their version.
"""
version = ('twisted', 2, 1, 0)
project = self.makeProject(version)
self.assertEqual(project.getVersion(), Version(*version))
def test_repr(self):
"""
The representation of a Project is Project(directory).
"""
foo = Project(FilePath('bar'))
self.assertEqual(
repr(foo), 'Project(%r)' % (foo.directory))
def test_findTwistedStyleProjects(self):
"""
findTwistedStyleProjects finds all projects underneath a particular
directory. A 'project' is defined by the existence of a 'topfiles'
directory and is returned as a Project object.
"""
baseDirectory = self.makeProjects(
('foo', 2, 3, 0), ('foo.bar', 0, 7, 4))
projects = findTwistedProjects(baseDirectory)
self.assertProjectsEqual(
projects,
[Project(baseDirectory.child('foo')),
Project(baseDirectory.child('foo').child('bar'))])
class UtilityTests(ExternalTempdirTestCase):
"""
Tests for various utility functions for releasing.
"""
def test_chdir(self):
"""
Test that the runChdirSafe is actually safe, i.e., it still
changes back to the original directory even if an error is
raised.
"""
cwd = os.getcwd()
def chAndBreak():
os.mkdir('releaseCh')
os.chdir('releaseCh')
1 // 0
self.assertRaises(ZeroDivisionError,
release.runChdirSafe, chAndBreak)
self.assertEqual(cwd, os.getcwd())
def test_replaceInFile(self):
"""
L{replaceInFile} replaces data in a file based on a dict. A key from
the dict that is found in the file is replaced with the corresponding
value.
"""
content = 'foo\nhey hey $VER\nbar\n'
with open('release.replace', 'w') as outf:
outf.write(content)
expected = content.replace('$VER', '2.0.0')
replaceInFile('release.replace', {'$VER': '2.0.0'})
with open('release.replace') as f:
self.assertEqual(f.read(), expected)
expected = expected.replace('2.0.0', '3.0.0')
replaceInFile('release.replace', {'2.0.0': '3.0.0'})
with open('release.replace') as f:
self.assertEqual(f.read(), expected)
def doNotFailOnNetworkError(func):
"""
A decorator which makes APIBuilder tests not fail because of intermittent
network failures -- mamely, APIBuilder being unable to get the "object
inventory" of other projects.
@param func: The function to decorate.
@return: A decorated function which won't fail if the object inventory
fetching fails.
"""
@functools.wraps(func)
def wrapper(*a, **kw):
try:
func(*a, **kw)
except FailTest as e:
if e.args[0].startswith("'Failed to get object inventory from "):
raise SkipTest(
("This test is prone to intermittent network errors. "
"See ticket 8753. Exception was: {!r}").format(e))
raise
return wrapper
class DoNotFailTests(TestCase):
"""
Tests for L{doNotFailOnNetworkError}.
"""
def test_skipsOnAssertionError(self):
"""
When the test raises L{FailTest} and the assertion failure starts with
"'Failed to get object inventory from ", the test will be skipped
instead.
"""
@doNotFailOnNetworkError
def inner():
self.assertEqual("Failed to get object inventory from blah", "")
try:
inner()
except Exception as e:
self.assertIsInstance(e, SkipTest)
def test_doesNotSkipOnDifferentError(self):
"""
If there is a L{FailTest} that is not the intersphinx fetching error,
it will be passed through.
"""
@doNotFailOnNetworkError
def inner():
self.assertEqual("Error!!!!", "")
try:
inner()
except Exception as e:
self.assertIsInstance(e, FailTest)
class APIBuilderTests(ExternalTempdirTestCase):
"""
Tests for L{APIBuilder}.
"""
skip = pydoctorSkip
@doNotFailOnNetworkError
def test_build(self):
"""
L{APIBuilder.build} writes an index file which includes the name of the
project specified.
"""
stdout = StringIO()
self.patch(sys, 'stdout', stdout)
projectName = "Foobar"
packageName = "quux"
projectURL = "scheme:project"
sourceURL = "scheme:source"
docstring = "text in docstring"
privateDocstring = "should also appear in output"
inputPath = FilePath(self.mktemp()).child(packageName)
inputPath.makedirs()
inputPath.child("__init__.py").setContent(
"def foo():\n"
" '%s'\n"
"def _bar():\n"
" '%s'" % (docstring, privateDocstring))
outputPath = FilePath(self.mktemp())
builder = APIBuilder()
builder.build(projectName, projectURL, sourceURL, inputPath,
outputPath)
indexPath = outputPath.child("index.html")
self.assertTrue(
indexPath.exists(),
"API index %r did not exist." % (outputPath.path,))
self.assertIn(
'<a href="%s">%s</a>' % (projectURL, projectName),
indexPath.getContent(),
"Project name/location not in file contents.")
quuxPath = outputPath.child("quux.html")
self.assertTrue(
quuxPath.exists(),
"Package documentation file %r did not exist." % (quuxPath.path,))
self.assertIn(
docstring, quuxPath.getContent(),
"Docstring not in package documentation file.")
self.assertIn(
'<a href="%s/%s">View Source</a>' % (sourceURL, packageName),
quuxPath.getContent())
self.assertIn(
'<a class="functionSourceLink" href="%s/%s/__init__.py#L1">' % (
sourceURL, packageName),
quuxPath.getContent())
self.assertIn(privateDocstring, quuxPath.getContent())
# There should also be a page for the foo function in quux.
self.assertTrue(quuxPath.sibling('quux.foo.html').exists())
self.assertEqual(stdout.getvalue(), '')
@doNotFailOnNetworkError
def test_buildWithPolicy(self):
"""
L{BuildAPIDocsScript.buildAPIDocs} builds the API docs with values
appropriate for the Twisted project.
"""
stdout = StringIO()
self.patch(sys, 'stdout', stdout)
docstring = "text in docstring"
projectRoot = FilePath(self.mktemp())
packagePath = projectRoot.child("twisted")
packagePath.makedirs()
packagePath.child("__init__.py").setContent(
"def foo():\n"
" '%s'\n" % (docstring,))
packagePath.child("_version.py").setContent(
genVersion("twisted", 1, 0, 0))
outputPath = FilePath(self.mktemp())
script = BuildAPIDocsScript()
script.buildAPIDocs(projectRoot, outputPath)
indexPath = outputPath.child("index.html")
self.assertTrue(
indexPath.exists(),
"API index %r did not exist." % (outputPath.path,))
self.assertIn(
'<a href="http://twistedmatrix.com/">Twisted</a>',
indexPath.getContent(),
"Project name/location not in file contents.")
twistedPath = outputPath.child("twisted.html")
self.assertTrue(
twistedPath.exists(),
"Package documentation file %r did not exist."
% (twistedPath.path,))
self.assertIn(
docstring, twistedPath.getContent(),
"Docstring not in package documentation file.")
#Here we check that it figured out the correct version based on the
#source code.
self.assertIn(
'<a href="https://github.com/twisted/twisted/tree/'
'twisted-1.0.0/src/twisted">View Source</a>',
twistedPath.getContent())
self.assertEqual(stdout.getvalue(), '')
@doNotFailOnNetworkError
def test_buildWithDeprecated(self):
"""
The templates and System for Twisted includes adding deprecations.
"""
stdout = StringIO()
self.patch(sys, 'stdout', stdout)
projectName = "Foobar"
packageName = "quux"
projectURL = "scheme:project"
sourceURL = "scheme:source"
docstring = "text in docstring"
privateDocstring = "should also appear in output"
inputPath = FilePath(self.mktemp()).child(packageName)
inputPath.makedirs()
inputPath.child("__init__.py").setContent(
"from twisted.python.deprecate import deprecated\n"
"from incremental import Version\n"
"@deprecated(Version('Twisted', 15, 0, 0), "
"'Baz')\n"
"def foo():\n"
" '%s'\n"
"from twisted.python import deprecate\n"
"import incremental\n"
"@deprecate.deprecated(incremental.Version('Twisted', 16, 0, 0))\n"
"def _bar():\n"
" '%s'\n"
"@deprecated(Version('Twisted', 14, 2, 3), replacement='stuff')\n"
"class Baz(object):\n"
" pass"
"" % (docstring, privateDocstring))
outputPath = FilePath(self.mktemp())
builder = APIBuilder()
builder.build(projectName, projectURL, sourceURL, inputPath,
outputPath)
quuxPath = outputPath.child("quux.html")
self.assertTrue(
quuxPath.exists(),
"Package documentation file %r did not exist." % (quuxPath.path,))
self.assertIn(
docstring, quuxPath.getContent(),
"Docstring not in package documentation file.")
self.assertIn(
'foo was deprecated in Twisted 15.0.0; please use Baz instead.',
quuxPath.getContent())
self.assertIn(
'_bar was deprecated in Twisted 16.0.0.',
quuxPath.getContent())
self.assertIn(privateDocstring, quuxPath.getContent())
# There should also be a page for the foo function in quux.
self.assertTrue(quuxPath.sibling('quux.foo.html').exists())
self.assertIn(
'foo was deprecated in Twisted 15.0.0; please use Baz instead.',
quuxPath.sibling('quux.foo.html').getContent())
self.assertIn(
'Baz was deprecated in Twisted 14.2.3; please use stuff instead.',
quuxPath.sibling('quux.Baz.html').getContent())
self.assertEqual(stdout.getvalue(), '')
def test_apiBuilderScriptMainRequiresTwoArguments(self):
"""
SystemExit is raised when the incorrect number of command line
arguments are passed to the API building script.
"""
script = BuildAPIDocsScript()
self.assertRaises(SystemExit, script.main, [])
self.assertRaises(SystemExit, script.main, ["foo"])
self.assertRaises(SystemExit, script.main, ["foo", "bar", "baz"])
def test_apiBuilderScriptMain(self):
"""
The API building script invokes the same code that
L{test_buildWithPolicy} tests.
"""
script = BuildAPIDocsScript()
calls = []
script.buildAPIDocs = lambda a, b: calls.append((a, b))
script.main(["hello", "there"])
self.assertEqual(calls, [(FilePath("hello"), FilePath("there"))])
class FilePathDeltaTests(TestCase):
"""
Tests for L{filePathDelta}.
"""
def test_filePathDeltaSubdir(self):
"""
L{filePathDelta} can create a simple relative path to a child path.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar"),
FilePath("/foo/bar/baz")),
["baz"])
def test_filePathDeltaSiblingDir(self):
"""
L{filePathDelta} can traverse upwards to create relative paths to
siblings.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar"),
FilePath("/foo/baz")),
["..", "baz"])
def test_filePathNoCommonElements(self):
"""
L{filePathDelta} can create relative paths to totally unrelated paths
for maximum portability.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar"),
FilePath("/baz/quux")),
["..", "..", "baz", "quux"])
def test_filePathDeltaSimilarEndElements(self):
"""
L{filePathDelta} doesn't take into account final elements when
comparing 2 paths, but stops at the first difference.
"""
self.assertEqual(filePathDelta(FilePath("/foo/bar/bar/spam"),
FilePath("/foo/bar/baz/spam")),
["..", "..", "baz", "spam"])
class SphinxBuilderTests(TestCase):
"""
Tests for L{SphinxBuilder}.
@note: This test case depends on twisted.web, which violates the standard
Twisted practice of not having anything in twisted.python depend on
other Twisted packages and opens up the possibility of creating
circular dependencies. Do not use this as an example of how to
structure your dependencies.
@ivar builder: A plain L{SphinxBuilder}.
@ivar sphinxDir: A L{FilePath} representing a directory to be used for
containing a Sphinx project.
@ivar sourceDir: A L{FilePath} representing a directory to be used for
containing the source files for a Sphinx project.
"""
skip = sphinxSkip
confContent = """\
source_suffix = '.rst'
master_doc = 'index'
"""
confContent = textwrap.dedent(confContent)
indexContent = """\
==============
This is a Test
==============
This is only a test
-------------------
In case you hadn't figured it out yet, this is a test.
"""
indexContent = textwrap.dedent(indexContent)
def setUp(self):
"""
Set up a few instance variables that will be useful.
"""
self.builder = SphinxBuilder()
# set up a place for a fake sphinx project
self.twistedRootDir = FilePath(self.mktemp())
self.sphinxDir = self.twistedRootDir.child("docs")
self.sphinxDir.makedirs()
self.sourceDir = self.sphinxDir
def createFakeSphinxProject(self):
"""
Create a fake Sphinx project for test purposes.
Creates a fake Sphinx project with the absolute minimum of source
files. This includes a single source file ('index.rst') and the
smallest 'conf.py' file possible in order to find that source file.
"""
self.sourceDir.child("conf.py").setContent(self.confContent)
self.sourceDir.child("index.rst").setContent(self.indexContent)
def verifyFileExists(self, fileDir, fileName):
"""
Helper which verifies that C{fileName} exists in C{fileDir} and it has
some content.
@param fileDir: A path to a directory.
@type fileDir: L{FilePath}
@param fileName: The last path segment of a file which may exist within
C{fileDir}.
@type fileName: L{str}
@raise: L{FailTest <twisted.trial.unittest.FailTest>} if
C{fileDir.child(fileName)}:
1. Does not exist.
2. Is empty.
3. In the case where it's a path to a C{.html} file, the
content looks like an HTML file.
@return: L{None}
"""
# check that file exists
fpath = fileDir.child(fileName)
self.assertTrue(fpath.exists())
# check that the output files have some content
fcontents = fpath.getContent()
self.assertTrue(len(fcontents) > 0)
# check that the html files are at least html-ish
# this is not a terribly rigorous check
if fpath.path.endswith('.html'):
self.assertIn("<body", fcontents)
def test_build(self):
"""
Creates and builds a fake Sphinx project using a L{SphinxBuilder}.
"""
self.createFakeSphinxProject()
self.builder.build(self.sphinxDir)
self.verifyBuilt()
def test_main(self):
"""
Creates and builds a fake Sphinx project as if via the command line.
"""
self.createFakeSphinxProject()
self.builder.main([self.sphinxDir.parent().path])
self.verifyBuilt()
def test_warningsAreErrors(self):
"""
Creates and builds a fake Sphinx project as if via the command line,
failing if there are any warnings.
"""
output = StringIO()
self.patch(sys, "stdout", output)
self.createFakeSphinxProject()
with self.sphinxDir.child("index.rst").open("a") as f:
f.write("\n.. _malformed-link-target\n")
exception = self.assertRaises(
SystemExit,
self.builder.main, [self.sphinxDir.parent().path]
)
self.assertEqual(exception.code, 1)
self.assertIn("malformed hyperlink target", output.getvalue())
self.verifyBuilt()
def verifyBuilt(self):
"""
Verify that a sphinx project has been built.
"""
htmlDir = self.sphinxDir.sibling('doc')
self.assertTrue(htmlDir.isdir())
doctreeDir = htmlDir.child("doctrees")
self.assertFalse(doctreeDir.exists())
self.verifyFileExists(htmlDir, 'index.html')
self.verifyFileExists(htmlDir, 'genindex.html')
self.verifyFileExists(htmlDir, 'objects.inv')
self.verifyFileExists(htmlDir, 'search.html')
self.verifyFileExists(htmlDir, 'searchindex.js')
def test_failToBuild(self):
"""
Check that SphinxBuilder.build fails when run against a non-sphinx
directory.
"""
# note no fake sphinx project is created
self.assertRaises(CalledProcessError,
self.builder.build,
self.sphinxDir)
class CommandsTestMixin(StructureAssertingMixin):
"""
Test mixin for the VCS commands used by the release scripts.
"""
def setUp(self):
self.tmpDir = FilePath(self.mktemp())
def test_ensureIsWorkingDirectoryWithWorkingDirectory(self):
"""
Calling the C{ensureIsWorkingDirectory} VCS command's method on a valid
working directory doesn't produce any error.
"""
reposDir = self.makeRepository(self.tmpDir)
self.assertIsNone(
self.createCommand.ensureIsWorkingDirectory(reposDir))
def test_ensureIsWorkingDirectoryWithNonWorkingDirectory(self):
"""
Calling the C{ensureIsWorkingDirectory} VCS command's method on an
invalid working directory raises a L{NotWorkingDirectory} exception.
"""
self.assertRaises(NotWorkingDirectory,
self.createCommand.ensureIsWorkingDirectory,
self.tmpDir)
def test_statusClean(self):
"""
Calling the C{isStatusClean} VCS command's method on a repository with
no pending modifications returns C{True}.
"""
reposDir = self.makeRepository(self.tmpDir)
self.assertTrue(self.createCommand.isStatusClean(reposDir))
def test_statusNotClean(self):
"""
Calling the C{isStatusClean} VCS command's method on a repository with
no pending modifications returns C{False}.
"""
reposDir = self.makeRepository(self.tmpDir)
reposDir.child('some-file').setContent("something")
self.assertFalse(self.createCommand.isStatusClean(reposDir))
def test_remove(self):
"""
Calling the C{remove} VCS command's method remove the specified path
from the directory.
"""
reposDir = self.makeRepository(self.tmpDir)
testFile = reposDir.child('some-file')
testFile.setContent("something")
self.commitRepository(reposDir)
self.assertTrue(testFile.exists())
self.createCommand.remove(testFile)
testFile.restat(False) # Refresh the file information
self.assertFalse(testFile.exists(), "File still exists")
def test_export(self):
"""
The C{exportTo} VCS command's method export the content of the
repository as identical in a specified directory.
"""
structure = {
"README.rst": "Hi this is 1.0.0.",
"twisted": {
"topfiles": {
"README": "Hi this is 1.0.0"},
"_version.py": genVersion("twisted", 1, 0, 0),
"web": {
"topfiles": {
"README": "Hi this is 1.0.0"},
"_version.py": genVersion("twisted.web", 1, 0, 0)}}}
reposDir = self.makeRepository(self.tmpDir)
self.createStructure(reposDir, structure)
self.commitRepository(reposDir)
exportDir = FilePath(self.mktemp()).child("export")
self.createCommand.exportTo(reposDir, exportDir)
self.assertStructure(exportDir, structure)
class GitCommandTest(CommandsTestMixin, ExternalTempdirTestCase):
"""
Specific L{CommandsTestMixin} related to Git repositories through
L{GitCommand}.
"""
createCommand = GitCommand
skip = gitSkip
def makeRepository(self, root):
"""
Create a Git repository in the specified path.
@type root: L{FilePath}
@params root: The directory to create the Git repository into.
@return: The path to the repository just created.
@rtype: L{FilePath}
"""
_gitInit(root)
return root
def commitRepository(self, repository):
"""
Add and commit all the files from the Git repository specified.
@type repository: L{FilePath}
@params repository: The Git repository to commit into.
"""
runCommand(["git", "-C", repository.path, "add"] +
glob.glob(repository.path + "/*"))
runCommand(["git", "-C", repository.path, "commit", "-m", "hop"])
class RepositoryCommandDetectionTest(ExternalTempdirTestCase):
"""
Test the L{getRepositoryCommand} to access the right set of VCS commands
depending on the repository manipulated.
"""
skip = gitSkip
def setUp(self):
self.repos = FilePath(self.mktemp())
def test_git(self):
"""
L{getRepositoryCommand} from a Git repository returns L{GitCommand}.
"""
_gitInit(self.repos)
cmd = getRepositoryCommand(self.repos)
self.assertIs(cmd, GitCommand)
def test_unknownRepository(self):
"""
L{getRepositoryCommand} from a directory which doesn't look like a Git
repository produces a L{NotWorkingDirectory} exception.
"""
self.assertRaises(NotWorkingDirectory, getRepositoryCommand,
self.repos)
class VCSCommandInterfaceTests(TestCase):
"""
Test that the VCS command classes implement their interface.
"""
def test_git(self):
"""
L{GitCommand} implements L{IVCSCommand}.
"""
self.assertTrue(IVCSCommand.implementedBy(GitCommand))
class CheckTopfileScriptTests(ExternalTempdirTestCase):
"""
Tests for L{CheckTopfileScript}.
"""
skip = gitSkip
def setUp(self):
self.origin = FilePath(self.mktemp())
_gitInit(self.origin)
runCommand(["git", "checkout", "-b", "trunk"],
cwd=self.origin.path)
self.origin.child("test").setContent(b"test!")
runCommand(["git", "add", self.origin.child("test").path],
cwd=self.origin.path)
runCommand(["git", "commit", "-m", "initial"],
cwd=self.origin.path)
self.repo = FilePath(self.mktemp())
runCommand(["git", "clone", self.origin.path, self.repo.path])
_gitConfig(self.repo)
def test_noArgs(self):
"""
Too few arguments returns a failure.
"""
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([])
self.assertEqual(e.exception.args,
("Must specify one argument: the Twisted checkout",))
def test_diffFromTrunkNoTopfiles(self):
"""
If there are changes from trunk, then there should also be a topfile.
"""
runCommand(["git", "checkout", "-b", "mypatch"],
cwd=self.repo.path)
somefile = self.repo.child("somefile")
somefile.setContent(b"change")
runCommand(["git", "add", somefile.path, somefile.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "some file"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (1,))
self.assertEqual(logs[-1],
"No newsfragment found. Have you committed it?")
def test_noChangeFromTrunk(self):
"""
If there are no changes from trunk, then no need to check the topfiles
"""
runCommand(["git", "checkout", "-b", "mypatch"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(
logs[-1],
"On trunk or no diffs from trunk; no need to look at this.")
def test_trunk(self):
"""
Running it on trunk always gives green.
"""
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(
logs[-1],
"On trunk or no diffs from trunk; no need to look at this.")
def test_release(self):
"""
Running it on a release branch returns green if there is no topfiles
even if there are changes.
"""
runCommand(["git", "checkout", "-b", "release-16.11111-9001"],
cwd=self.repo.path)
somefile = self.repo.child("somefile")
somefile.setContent(b"change")
runCommand(["git", "add", somefile.path, somefile.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "some file"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1],
"Release branch with no newsfragments, all good.")
def test_releaseWithTopfiles(self):
"""
Running it on a release branch returns red if there are new topfiles.
"""
runCommand(["git", "checkout", "-b", "release-16.11111-9001"],
cwd=self.repo.path)
topfiles = self.repo.child("twisted").child("newsfragments")
topfiles.makedirs()
fragment = topfiles.child("1234.misc")
fragment.setContent(b"")
unrelated = self.repo.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", fragment.path, unrelated.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "fragment"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (1,))
self.assertEqual(logs[-1],
"No newsfragments should be on the release branch.")
def test_onlyQuotes(self):
"""
Running it on a branch with only a quotefile change gives green.
"""
runCommand(["git", "checkout", "-b", "quotefile"],
cwd=self.repo.path)
fun = self.repo.child("docs").child("fun")
fun.makedirs()
quotes = fun.child("Twisted.Quotes")
quotes.setContent(b"Beep boop")
runCommand(["git", "add", quotes.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "quotes"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1],
"Quotes change only; no newsfragment needed.")
def test_topfileAdded(self):
"""
Running it on a branch with a fragment in the topfiles dir added
returns green.
"""
runCommand(["git", "checkout", "-b", "quotefile"],
cwd=self.repo.path)
topfiles = self.repo.child("twisted").child("newsfragments")
topfiles.makedirs()
fragment = topfiles.child("1234.misc")
fragment.setContent(b"")
unrelated = self.repo.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", fragment.path, unrelated.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "topfile"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1], "Found twisted/newsfragments/1234.misc")
def test_topfileButNotFragmentAdded(self):
"""
Running it on a branch with a non-fragment in the topfiles dir does not
return green.
"""
runCommand(["git", "checkout", "-b", "quotefile"],
cwd=self.repo.path)
topfiles = self.repo.child("twisted").child("newsfragments")
topfiles.makedirs()
notFragment = topfiles.child("1234.txt")
notFragment.setContent(b"")
unrelated = self.repo.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", notFragment.path, unrelated.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "not topfile"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (1,))
self.assertEqual(logs[-1],
"No newsfragment found. Have you committed it?")
def test_topfileAddedButWithOtherTopfiles(self):
"""
Running it on a branch with a fragment in the topfiles dir added
returns green, even if there are other files in the topfiles dir.
"""
runCommand(["git", "checkout", "-b", "quotefile"],
cwd=self.repo.path)
topfiles = self.repo.child("twisted").child("newsfragments")
topfiles.makedirs()
fragment = topfiles.child("1234.misc")
fragment.setContent(b"")
unrelated = topfiles.child("somefile")
unrelated.setContent(b"Boo")
runCommand(["git", "add", fragment.path, unrelated.path],
cwd=self.repo.path)
runCommand(["git", "commit", "-m", "topfile"],
cwd=self.repo.path)
logs = []
with self.assertRaises(SystemExit) as e:
CheckTopfileScript(logs.append).main([self.repo.path])
self.assertEqual(e.exception.args, (0,))
self.assertEqual(logs[-1], "Found twisted/newsfragments/1234.misc")
| |
import urlparse
import httplib_fork as httplib
from ws4py.client.threadedclient import WebSocketClient
import Queue
import socket
import re
class HttpResponse:
def __init__(self, method, url,
headers={}, body=None, async=False, load=True):
headers = headers.copy()
u = urlparse.urlparse(url)
kwargs = {'timeout': 1.0}
if u.scheme == 'http':
conn = httplib.HTTPConnection(u.netloc, **kwargs)
elif u.scheme == 'https':
conn = httplib.HTTPSConnection(u.netloc, **kwargs)
else:
assert False, "Unsupported scheme " + u.scheme
assert u.fragment == ''
path = u.path + ('?' + u.query if u.query else '')
self.conn = conn
if not body:
if method is 'POST':
# The spec says: "Applications SHOULD use this field
# to indicate the transfer-length of the message-body,
# unless this is prohibited by the rules in section
# 4.4."
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# While httplib sets it only if there is body.
headers['Content-Length'] = 0
conn.request(method, path, headers=headers)
else:
if isinstance(body, unicode):
body = body.encode('utf-8')
conn.request(method, path, headers=headers, body=body)
if load:
if not async:
self._load()
else:
self._async_load()
def _get_status(self):
return self.res.status
status = property(_get_status)
def __getitem__(self, key):
return self.headers.get(key.lower())
def _load(self):
# That works for Content-Length responses.
self.res = self.conn.getresponse()
self.headers = dict( (k.lower(), v) for k, v in self.res.getheaders() )
self.body = self.res.read()
self.close()
def close(self):
if self.conn:
self.conn.close()
self.conn = None
def _async_load(self):
# That works for Transfer-Encoding: Chunked
self.res = self.conn.getresponse()
self.headers = dict( (k.lower(), v) for k, v in self.res.getheaders() )
def read(self):
data = self.res.read(10240)
if data:
return data
else:
self.close()
return None
def old_POST_async(url, **kwargs):
return HttpResponse('POST', url, async=True, **kwargs)
class WebSocket8Client(object):
class ConnectionClosedException(Exception): pass
def __init__(self, url):
queue = Queue.Queue()
self.queue = queue
class IntWebSocketClient(WebSocketClient):
def received_message(self, m):
queue.put(unicode(str(m), 'utf-8'))
def read_from_connection(self, amount):
r = super(IntWebSocketClient, self).read_from_connection(amount)
if self.stream.closing:
queue.put((self.stream.closing.code, self.stream.closing.reason[2:]))
elif not r:
queue.put((1000, ""))
return r
self.client = IntWebSocketClient(url)
self.client.connect()
def close(self):
if self.client:
self.client.running = False
self.client.close()
self.client._th.join()
self.client = None
def send(self, data):
self.client.send(data)
def recv(self):
try:
r = self.queue.get(timeout=1.0)
if isinstance(r, tuple):
ce = self.ConnectionClosedException()
(ce.code, ce.reason) = r
raise ce
return r
except:
self.close()
raise
def recvline(s):
b = []
c = None
while c != '\n':
c = s.recv(1)
b.append( c )
return ''.join(b)
class CaseInsensitiveDict(object):
def __init__(self, *args, **kwargs):
self.lower = {}
self.d = dict(*args, **kwargs)
for k in self.d:
self[k] = self.d[k]
def __getitem__(self, key, *args, **kwargs):
pkey = self.lower.setdefault(key.lower(), key)
return self.d.__getitem__(pkey, *args, **kwargs)
def __setitem__(self, key, *args, **kwargs):
pkey = self.lower.setdefault(key.lower(), key)
return self.d.__setitem__(pkey, *args, **kwargs)
def items(self):
for k in self.lower.values():
yield (k, self[k])
def __repr__(self): return repr(self.d)
def __str__(self): return str(self.d)
def get(self, key, *args, **kwargs):
pkey = self.lower.setdefault(key.lower(), key)
return self.d.get(pkey, *args, **kwargs)
def __contains__(self, key):
pkey = self.lower.setdefault(key.lower(), key)
return pkey in self.d
class Response(object):
def __repr__(self):
return '<Response HTTP/%s %s %r %r>' % (
self.http, self.status, self.description, self.headers)
def __str__(self): return repr(self)
def __getitem__(self, key):
return self.headers.get(key)
def get(self, key, default):
return self.headers.get(key, default)
class RawHttpConnection(object):
def __init__(self, url):
u = urlparse.urlparse(url)
self.s = socket.create_connection((u.hostname, u.port), timeout=1)
def request(self, method, url, headers={}, body=None, timeout=1, http="1.1"):
headers = CaseInsensitiveDict(headers)
if method == 'POST':
body = (body or '').encode('utf-8')
u = urlparse.urlparse(url)
headers['Host'] = u.hostname + ':' + str(u.port) if u.port else u.hostname
if body is not None:
headers['Content-Length'] = str(len(body))
rel_url = url[ url.find(u.path): ]
req = ["%s %s HTTP/%s" % (method, rel_url, http)]
for k, v in headers.items():
req.append( "%s: %s" % (k, v) )
req.append('')
req.append('')
self.send('\r\n'.join(req))
if body:
self.send(body)
head = recvline(self.s)
r = re.match(r'HTTP/(?P<version>\S+) (?P<status>\S+) (?P<description>.*)', head)
resp = Response()
resp.http = r.group('version')
resp.status = int(r.group('status'))
resp.description = r.group('description').rstrip('\r\n')
resp.headers = CaseInsensitiveDict()
while True:
header = recvline(self.s)
if header in ['\n', '\r\n']:
break
k, _, v = header.partition(':')
resp.headers[k] = v.lstrip().rstrip('\r\n')
return resp
def read(self, size=None):
if size is None:
# A single packet by default
return self.s.recv(999999)
data = []
while size > 0:
c = self.s.recv(size)
if not c:
raise Exception('Socket closed!')
size -= len(c)
data.append( c )
return ''.join(data)
def read_till_eof(self):
data = []
while True:
c = self.s.recv(999999)
if not c:
break
data.append( c )
return ''.join(data)
def closed(self):
# To check if socket is being closed, we need to recv and see
# if the response is empty. If it is not - we're in trouble -
# abort.
t = self.s.settimeout(0.1)
r = self.s.recv(1) == ''
if not r:
raise Exception('Socket not closed!')
self.s.settimeout(t)
return r
def read_chunk(self):
line = recvline(self.s).rstrip('\r\n')
bytes = int(line, 16) + 2 # Additional \r\n
return self.read(bytes)[:-2]
def send(self, data):
self.s.sendall(data)
def close(self):
self.s.close()
def SynchronousHttpRequest(method, url, **kwargs):
c = RawHttpConnection(url)
r = c.request(method, url, **kwargs)
if r.get('Transfer-Encoding', '').lower() == 'chunked':
chunks = []
while True:
chunk = c.read_chunk()
if len(chunk) == 0:
break
chunks.append( chunk )
r.body = ''.join(chunks)
elif r.get('Content-Length', ''):
cl = int(r['Content-Length'])
r.body = c.read(cl)
elif 'close' in [k.strip() for k in r.get('Connection', '').lower().split(',')]:
r.body = c.read_till_eof()
else:
# Whitelist statuses that may not need a response
if r.status in [101, 304, 204]:
r.body = ''
else:
raise Exception(str(r.status) + ' '+str(r.headers) + " No Transfer-Encoding:chunked nor Content-Length nor Connection:Close!")
c.close()
return r
def GET(url, **kwargs):
return SynchronousHttpRequest('GET', url, **kwargs)
def POST(url, **kwargs):
return SynchronousHttpRequest('POST', url, **kwargs)
def OPTIONS(url, **kwargs):
return SynchronousHttpRequest('OPTIONS', url, **kwargs)
def AsynchronousHttpRequest(method, url, **kwargs):
c = RawHttpConnection(url)
r = c.request(method, url, **kwargs)
if r.get('Transfer-Encoding', '').lower() == 'chunked':
def read():
return c.read_chunk()
r.read = read
elif r.get('Content-Length', ''):
cl = int(r['Content-Length'])
def read():
return c.read(cl)
r.read = read
elif ('close' in [k.strip() for k in r.get('Connection', '').lower().split(',')]
or r.status == 101):
def read():
return c.read()
r.read = read
else:
raise Exception(str(r.status) + ' '+str(r.headers) + " No Transfer-Encoding:chunked nor Content-Length nor Connection:Close!")
def close():
c.close()
r.close = close
return r
def GET_async(url, **kwargs):
return AsynchronousHttpRequest('GET', url, **kwargs)
def POST_async(url, **kwargs):
return AsynchronousHttpRequest('POST', url, **kwargs)
| |
from __future__ import absolute_import, division, print_function
from fnmatch import fnmatch
from glob import glob
import os
import uuid
from warnings import warn
import pandas as pd
from toolz import merge
from ...async import get_sync
from ...base import tokenize
from ...compatibility import PY3
from ...context import _globals
from ...delayed import Delayed, delayed
from ... import multiprocessing
from ..core import DataFrame, new_dd_object
from ...utils import build_name_function, effective_get, get_scheduler_lock
from .io import _link
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
def to_hdf(df, path, key, mode='a', append=False, get=None,
name_function=None, compute=True, lock=None, dask_kwargs={},
**kwargs):
""" Store Dask Dataframe to Hierarchical Data Format (HDF) files
This is a parallel version of the Pandas function of the same name. Please
see the Pandas docstring for more detailed information about shared keyword
arguments.
This function differs from the Pandas version by saving the many partitions
of a Dask DataFrame in parallel, either to many files, or to many datasets
within the same file. You may specify this parallelism with an asterix
``*`` within the filename or datapath, and an optional ``name_function``.
The asterix will be replaced with an increasing sequence of integers
starting from ``0`` or with the result of calling ``name_function`` on each
of those integers.
This function only supports the Pandas ``'table'`` format, not the more
specialized ``'fixed'`` format.
Parameters
----------
path: string
Path to a target filename. May contain a ``*`` to denote many filenames
key: string
Datapath within the files. May contain a ``*`` to denote many locations
name_function: function
A function to convert the ``*`` in the above options to a string.
Should take in a number from 0 to the number of partitions and return a
string. (see examples below)
compute: bool
Whether or not to execute immediately. If False then this returns a
``dask.Delayed`` value.
lock: Lock, optional
Lock to use to prevent concurrency issues. By default a
``threading.Lock``, ``multiprocessing.Lock`` or ``SerializableLock``
will be used depending on your scheduler if a lock is required. See
dask.utils.get_scheduler_lock for more information about lock
selection.
**other:
See pandas.to_hdf for more information
Examples
--------
Save Data to a single file
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Save data to multiple datapaths within the same file:
>>> df.to_hdf('output.hdf', '/data-*') # doctest: +SKIP
Save data to multiple files:
>>> df.to_hdf('output-*.hdf', '/data') # doctest: +SKIP
Save data to multiple files, using the multiprocessing scheduler:
>>> df.to_hdf('output-*.hdf', '/data', get=dask.multiprocessing.get) # doctest: +SKIP
Specify custom naming scheme. This writes files as
'2000-01-01.hdf', '2000-01-02.hdf', '2000-01-03.hdf', etc..
>>> from datetime import date, timedelta
>>> base = date(year=2000, month=1, day=1)
>>> def name_function(i):
... ''' Convert integer 0 to n to a string '''
... return base + timedelta(days=i)
>>> df.to_hdf('*.hdf', '/data', name_function=name_function) # doctest: +SKIP
Returns
-------
None: if compute == True
delayed value: if compute == False
See Also
--------
read_hdf:
to_parquet:
"""
name = 'to-hdf-' + uuid.uuid1().hex
pd_to_hdf = getattr(df._partition_type, 'to_hdf')
single_file = True
single_node = True
# if path is string, format using i_name
if isinstance(path, str):
if path.count('*') + key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in file "
"path and dataset key")
fmt_obj = lambda path, i_name: path.replace('*', i_name)
if '*' in path:
single_file = False
else:
if key.count('*') > 1:
raise ValueError("A maximum of one asterisk is accepted in "
"dataset key")
fmt_obj = lambda path, _: path
if '*' in key:
single_node = False
if 'format' in kwargs and kwargs['format'] != 'table':
raise ValueError("Dask only support 'table' format in hdf files.")
if mode not in ('a', 'w', 'r+'):
raise ValueError("Mode must be one of 'a', 'w' or 'r+'")
if name_function is None:
name_function = build_name_function(df.npartitions - 1)
# we guarantee partition order is preserved when its saved and read
# so we enforce name_function to maintain the order of its input.
if not (single_file and single_node):
formatted_names = [name_function(i) for i in range(df.npartitions)]
if formatted_names != sorted(formatted_names):
warn("To preserve order between partitions name_function "
"must preserve the order of its input")
# If user did not specify scheduler and write is sequential default to the
# sequential scheduler. otherwise let the _get method choose the scheduler
if get is None and 'get' not in _globals and single_node and single_file:
get = get_sync
# handle lock default based on whether we're writing to a single entity
_actual_get = effective_get(get, df)
if lock is None:
if not single_node:
lock = True
elif not single_file and _actual_get is not multiprocessing.get:
# if we're writing to multiple files with the multiprocessing
# scheduler we don't need to lock
lock = True
else:
lock = False
if lock:
lock = get_scheduler_lock(get, df)
kwargs.update({'format': 'table', 'mode': mode, 'append': append})
dsk = dict()
i_name = name_function(0)
dsk[(name, 0)] = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, 0), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs)
kwargs2 = kwargs.copy()
if single_file:
kwargs2['mode'] = 'a'
if single_node:
kwargs2['append'] = True
for i in range(1, df.npartitions):
i_name = name_function(i)
task = (_pd_to_hdf, pd_to_hdf, lock,
[(df._name, i), fmt_obj(path, i_name),
key.replace('*', i_name)], kwargs2)
if single_file:
link_dep = i - 1 if single_node else 0
task = (_link, (name, link_dep), task)
dsk[(name, i)] = task
dsk = merge(df.dask, dsk)
if single_file and single_node:
keys = [(name, df.npartitions - 1)]
else:
keys = [(name, i) for i in range(df.npartitions)]
if compute:
return DataFrame._get(dsk, keys, get=get, **dask_kwargs)
else:
return delayed([Delayed(k, dsk) for k in keys])
dont_use_fixed_error_message = """
This HDFStore is not partitionable and can only be use monolithically with
pandas. In the future when creating HDFStores use the ``format='table'``
option to ensure that your dataset can be parallelized"""
read_hdf_error_msg = """
The start and stop keywords are not supported when reading from more than
one file/dataset.
The combination is ambiguous because it could be interpreted as the starting
and stopping index per file, or starting and stopping index of the global
dataset."""
def _read_single_hdf(path, key, start=0, stop=None, columns=None,
chunksize=int(1e6), sorted_index=False, lock=None,
mode='a'):
"""
Read a single hdf file into a dask.dataframe. Used for each file in
read_hdf.
"""
def get_keys_stops_divisions(path, key, stop, sorted_index):
"""
Get the "keys" or group identifiers which match the given key, which
can contain wildcards. This uses the hdf file identified by the
given path. Also get the index of the last row of data for each matched
key.
"""
with pd.HDFStore(path, mode=mode) as hdf:
keys = [k for k in hdf.keys() if fnmatch(k, key)]
stops = []
divisions = []
for k in keys:
storer = hdf.get_storer(k)
if storer.format_type != 'table':
raise TypeError(dont_use_fixed_error_message)
if stop is None:
stops.append(storer.nrows)
elif stop > storer.nrows:
raise ValueError("Stop keyword exceeds dataset number "
"of rows ({})".format(storer.nrows))
else:
stops.append(stop)
if sorted_index:
division_start = storer.read_column('index', start=0, stop=1)[0]
division_end = storer.read_column('index', start=storer.nrows - 1,
stop=storer.nrows)[0]
divisions.append([division_start, division_end])
else:
divisions.append(None)
return keys, stops, divisions
def one_path_one_key(path, key, start, stop, columns, chunksize, division, lock):
"""
Get the data frame corresponding to one path and one key (which should
not contain any wildcards).
"""
empty = pd.read_hdf(path, key, mode=mode, stop=0)
if columns is not None:
empty = empty[columns]
token = tokenize((path, os.path.getmtime(path), key, start,
stop, empty, chunksize, division))
name = 'read-hdf-' + token
if empty.ndim == 1:
base = {'name': empty.name, 'mode': mode}
else:
base = {'columns': empty.columns, 'mode': mode}
if start >= stop:
raise ValueError("Start row number ({}) is above or equal to stop "
"row number ({})".format(start, stop))
if division:
dsk = {(name, 0): (_pd_read_hdf, path, key, lock,
base)}
divisions = division
else:
def update(s):
new = base.copy()
new.update({'start': s, 'stop': s + chunksize})
return new
dsk = dict(((name, i), (_pd_read_hdf, path, key, lock,
update(s)))
for i, s in enumerate(range(start, stop, chunksize)))
divisions = [None] * (len(dsk) + 1)
return new_dd_object(dsk, name, empty, divisions)
keys, stops, divisions = get_keys_stops_divisions(path, key, stop, sorted_index)
if (start != 0 or stop is not None) and len(keys) > 1:
raise NotImplementedError(read_hdf_error_msg)
from ..multi import concat
return concat([one_path_one_key(path, k, start, s, columns, chunksize, d, lock)
for k, s, d in zip(keys, stops, divisions)])
def _pd_read_hdf(path, key, lock, kwargs):
""" Read from hdf5 file with a lock """
if lock:
lock.acquire()
try:
result = pd.read_hdf(path, key, **kwargs)
finally:
if lock:
lock.release()
return result
def read_hdf(pattern, key, start=0, stop=None, columns=None,
chunksize=1000000, sorted_index=False, lock=True, mode='a'):
"""
Read HDF files into a Dask DataFrame
Read hdf files into a dask dataframe. This function is like
``pandas.read_hdf``, except it can read from a single large file, or from
multiple files, or from multiple keys from the same file.
Parameters
----------
pattern : pattern (string), or buffer to read from. Can contain wildcards
key : group identifier in the store. Can contain wildcards
start : optional, integer (defaults to 0), row number to start at
stop : optional, integer (defaults to None, the last row), row number to
stop at
columns : optional, a list of columns that if not None, will limit the
return columns
chunksize : optional, positive integer
maximal number of rows per partition
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_hdf('myfile.1.hdf5', '/x') # doctest: +SKIP
Load multiple files
>>> dd.read_hdf('myfile.*.hdf5', '/x') # doctest: +SKIP
Load multiple datasets
>>> dd.read_hdf('myfile.1.hdf5', '/*') # doctest: +SKIP
"""
if lock is True:
lock = get_scheduler_lock()
key = key if key.startswith('/') else '/' + key
paths = sorted(glob(pattern))
if (start != 0 or stop is not None) and len(paths) > 1:
raise NotImplementedError(read_hdf_error_msg)
if chunksize <= 0:
raise ValueError("Chunksize must be a positive integer")
if (start != 0 or stop is not None) and sorted_index:
raise ValueError("When assuming pre-partitioned data, data must be "
"read in its entirety using the same chunksizes")
from ..multi import concat
return concat([_read_single_hdf(path, key, start=start, stop=stop,
columns=columns, chunksize=chunksize,
sorted_index=sorted_index,
lock=lock, mode=mode)
for path in paths])
if PY3:
from ..core import _Frame
_Frame.to_hdf.__doc__ = to_hdf.__doc__
| |
#!/usr/bin/env python
import json
import os.path
import hmac
from time import time
from uuid import uuid1
from datetime import datetime
from hashlib import sha256, md5
from snapy.utils import (encrypt, decrypt, decrypt_story,
make_media_id, request, get_auth_token,
make_request_token, get_attestation,
timestamp, STATIC_TOKEN, get_client_auth_token)
from snapy.API import CasperAPI
MEDIA_IMAGE = 0
MEDIA_VIDEO = 1
MEDIA_VIDEO_NOAUDIO = 2
FRIEND_CONFIRMED = 0
FRIEND_UNCONFIRMED = 1
FRIEND_BLOCKED = 2
PRIVACY_EVERYONE = 0
PRIVACY_FRIENDS = 1
def is_video(data):
return True if data[0:2] == b'\x00\x00' else False
def is_image(data):
return True if data[0:2] == b'\xFF\xD8' else False
def is_zip(data):
return True if data[0:2] == b'PK' else False
def get_file_extension(media_type):
if media_type in (MEDIA_VIDEO, MEDIA_VIDEO_NOAUDIO):
return 'mp4'
if media_type == MEDIA_IMAGE:
return 'jpg'
return ''
def get_media_type(data):
if is_video(data):
return MEDIA_VIDEO
if is_image(data):
return MEDIA_IMAGE
if is_zip(data):
return MEDIA_VIDEO
return None
def _map_keys(snap):
return {
u'id': snap.get('id', None),
u'media_id': snap.get('c_id', None),
u'media_type': snap.get('m', None),
u'time': snap.get('t', None),
u'sender': snap.get('sn', None),
u'recipient': snap.get('rp', None),
u'status': snap.get('st', None),
u'screenshot_count': snap.get('c', None),
u'sent': snap.get('sts', None),
u'opened': snap.get('ts', None)
}
class Snapchat(object):
"""Construct a :class:`Snapchat` object used for communicating
with the Snapchat API.
Usage:
from snapy import Snapchat
snapchat = Snapchat()
snapchat.login('username', 'password', 'gmail_addr', 'gmail_passwd')
...
"""
def __init__(self):
self.username = None
self.auth_token = None
self.gmail = None
self.gpasswd = None
self.gauth = None
self.expiry = datetime.fromtimestamp(0)
def _request(self, endpoint, data=None, params=None, files=None,
raise_for_status=True, req_type='post', moreheaders={}):
return request(endpoint, self.auth_token, data, params, files,
raise_for_status, req_type, moreheaders)
def _get_device_token(self):
r = self._request('/loq/device_id',params={'gauth': self._get_gauth()})
return r.json()
def _unset_auth(self):
self.username = None
self.auth_token = None
def _get_gauth(self):
"""This is the proper way to access self.gauth when using it for an
API request. This first checks to see if the Bearer token is expired,
renewing it if needed.
"""
if datetime.now() >= self.expiry:
gauth_token = get_auth_token(self.gmail, self.gpasswd)
self.gauth = gauth_token[0]
self.expiry = gauth_token[1]
return self.gauth
def _get_conversation_auth(self, to):
"""Gets conversation auth for a certain user.
Only takes in one user, returns a dict.
"""
now = str(timestamp())
r = self._request('/loq/conversation_auth_token',{
'username': self.username,
'timestamp': now,
'conversation_id': self.username + "~" + to
}, {'now': now, 'gauth': self._get_gauth()})
return r.json()
def restore_token(self, username, auth_token, gmail, gpasswd):
"""Restore a Snapchat session from an auth_token parameter
returned in the response of a login request. Useful for when
Snapchat breaks the login API.
:param username Snapchat username
:param auth_token Snapchat auth_token
:param gmail Gmail address
:param gpasswd Gmail password
"""
self.username = username
self.auth_token = auth_token
self.gmail = gmail
self.gpasswd = gpasswd
gauth_token = get_auth_token(gmail, gpasswd)
self.gauth = gauth_token[0]
self.expiry = gauth_token[1]
def login(self, username, password, gmail, gpasswd, ckey, csecret):
"""Login to Snapchat account
Returns a dict containing user information on successful login, the
data returned is similar to get_updates.
:param username Snapchat username
:param password Snapchat password
:param gmail Gmail address
:param gpasswd Gmail password
"""
self.gmail = gmail
self.gpasswd = gpasswd
casper = CasperAPI(ckey, csecret)
i = 0
logged_in = False
while i < 4 and logged_in == False:
i += 1
now = str(timestamp())
req_token = make_request_token(STATIC_TOKEN, now)
gauth_token = get_auth_token(gmail, gpasswd)
self.gauth = gauth_token[0]
self.expiry = gauth_token[1]
string = username + "|" + password + "|" + now + "|" + req_token
dtoken = self._get_device_token()
self._unset_auth()
nonce = casper.generateSnapchatNonce(username, password, now)
attestation = casper.getSnapchatAttestation(nonce)
r = self._request('/loq/login', {
'username': username,
'password': password,
'height': 1280,
'width': 720,
'max_video_height': 640,
'max_video_width': 480,
'dsig': hmac.new(str(dtoken['dtoken1v']),string,sha256).hexdigest()[:20],
'dtoken1i': dtoken['dtoken1i'],
'ptoken': "ie",
'attestation': attestation,
'sflag': 1,
'application_id': 'com.snapchat.android',
'req_token': req_token
}, {
'now': now,
'gauth': self._get_gauth()
}, None, True, 'post', {
'X-Snapchat-Client-Auth': casper.getSnapchatClientAuth(username, password, now)
})
result = r.json()
if 'updates_response' in result:
logged_in = True
if 'auth_token' in result['updates_response']:
self.auth_token = result['updates_response']['auth_token']
if 'username' in result['updates_response']:
self.username = username
if self.username is None and self.auth_token is None:
raise Exception(result.get('message', 'unknown error'))
return result
def logout(self):
"""Logout of Snapchat account
Returns true if logout was successful.
"""
r = self._request('logout', {'username': self.username})
return len(r.content) == 0
def get_updates(self, update_timestamp=0):
"""Get user, friend and snap updates
Returns a dict containing user, friends and snap information.
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
now = str(timestamp())
r = self._request('/loq/all_updates', {
'timestamp': now,
'username': self.username,
'height': 1280,
'width': 720,
'max_video_height': 640,
'max_video_width': 480
}, {
'now': now,
'gauth': self._get_gauth()
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
return result
def get_conversations(self):
"""Returns a list of conversations
with other users.
"""
offset = None
updates = self.get_updates()
try:
last = updates['conversations_response'][-2]
offset = last['iter_token']
except IndexError:
print "No conversations except TeamSnapchat"
convos = updates['conversations_response']
"""
while len(offset) > 0:
now = str(timestamp())
result = self._request('conversations', {
'username': self.username,
'timestamp': now,
'checksum': md5(self.username).hexdigest(),
'offset': offset,
'features_map': '{}'
}, {
'now': now,
'gauth': self.gauth
})
print result.json()
convos += result.json()['conversations_response']
last = result.json()['conversations_response'][-1]
offset = last['iter_token'] if 'iter_token' in last else ""
"""
return convos
def get_snaps(self):
"""Get snaps
Returns a list containing metadata for snaps
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
snaps = []
conversations = self.get_conversations()
for conversation in conversations:
num_pending = len(conversation['pending_received_snaps'])
for i in range(0, num_pending):
snap = (_map_keys(conversation['pending_received_snaps'][i]))
snaps.append(snap)
return snaps
def get_friend_stories(self, update_timestamp=0):
"""Get stories
Returns a dict containing metadata for stories
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
result = self.get_updates()
stories = []
fstories = []
story_groups = result['stories_response']['friend_stories']
for group in story_groups:
sender = group['username']
for story in group['stories']:
obj = story['story']
if obj['is_shared'] == False and obj['username'] != 'teamsnapchat':
stories.append(obj)
return stories
def get_story_blob(self, story_id, story_key, story_iv):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param story_id: Media id to fetch
:param story_key: Encryption key of the story
:param story_iv: Encryption IV of the story
"""
now = str(timestamp())
r = self._request('/bq/story_blob', {'story_id': story_id},
raise_for_status=False, req_type='get')
data = decrypt_story(r.content, story_key, story_iv)
return data
def get_blob(self, snap_id):
"""Get the image or video of a given snap
Returns the image or a video of the given snap or None if
data is invalid.
:param snap_id: Snap id to fetch
"""
now = str(timestamp())
r = self._request('/bq/blob', {'id': snap_id, 'timestamp':now, 'username': self.username},
{'now': now, 'gauth': self._get_gauth()}, req_type='get')
return r.content
def send_events(self, events, data=None):
"""Send event data
Returns true on success.
:param events: List of events to send
:param data: Additional data to send
"""
now = str(timestamp())
if data is None:
data = {}
r = self._request('/bq/update_snaps', {
'events': json.dumps(events),
'json': json.dumps(data),
'username': self.username
}, {'now': now,'gauth': self._get_gauth()})
return len(r.content) == 0
def mark_viewed(self, snap_id, view_duration=1):
"""Mark a snap as viewed
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration}}
events = [
{
u'eventName': u'SNAP_VIEW', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
},
{
u'eventName': u'SNAP_EXPIRED', u'params': {u'id': snap_id},
u'ts': int(round(now))
}
]
return self.send_events(events, data)
def mark_screenshot(self, snap_id, view_duration=1):
"""Mark a snap as screenshotted
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration, u'c': 3}}
events = [
{
u'eventName': u'SNAP_SCREENSHOT', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
}
]
return self.send_events(events, data)
def update_privacy(self, friends_only):
"""Set privacy settings
Returns true on success.
:param friends_only: True to allow snaps from friends only
"""
setting = lambda f: PRIVACY_FRIENDS if f else PRIVACY_EVERYONE
r = self._request('settings', {
'username': self.username,
'action': 'updatePrivacy',
'privacySetting': setting(friends_only)
})
return r.json().get('param') == str(setting(friends_only))
def get_friends(self):
"""Get friends
Returns a list of friends.
"""
friends = []
for friend in self.get_updates().get('friends_response', [])['friends']:
friends.append(friend['name'])
return friends
def get_best_friends(self):
"""Get best friends
Returns a list of best friends.
"""
return self.get_updates().get('bests', [])
def add_friend(self, username):
"""Add user as friend
Returns JSON response.
Expected messages:
Success: '{username} is now your friend!'
Pending: '{username} is private. Friend request sent.'
Failure: 'Sorry! Couldn't find {username}'
:param username: Username to add as a friend
"""
now = str(timestamp())
r = self._request('/bq/friend', {
'action': 'add',
'friend': username,
'timestamp': now,
'username': self.username,
'added_by': 'ADDED_BY_USERNAME'
}, {'now': now, 'gauth': self._get_gauth()})
return r.json()
def delete_friend(self, username):
"""Remove user from friends
Returns true on success.
:param username: Username to remove from friends
"""
now = str(timestamp())
r = self._request('/bq/friend', {
'action': 'delete',
'friend': username,
'timestamp': now,
'username': self.username
}, {'now': now, 'gauth': self._get_gauth()})
return r.json()
def block(self, username):
"""Block a user
Returns true on success.
:param username: Username to block
"""
now = (str(timestamp()))
r = self._request('/bq/friend', {
'action': 'block',
'friend': username,
'username': self.username,
'features_map': '{}',
'timestamp': now
}, {'gauth': self._get_gauth()})
return r.json().get('message') == '{0} was blocked'.format(username)
def unblock(self, username):
"""Unblock a user
Returns true on success.
:param username: Username to unblock
"""
r = self._request('friend', {
'action': 'unblock',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was unblocked'.format(username)
def get_blocked(self):
"""Find blocked users
Returns a list of currently blocked users.
"""
return [f for f in self.get_friends() if f['type'] == FRIEND_BLOCKED]
def get_requested(self):
"""Find friend requests
Returns a list of users requests a friendship.
"""
requests = []
for request in self.get_updates().get('friends_response', [])['added_friends']:
requests.append(request)
return requests
def upload(self, path):
"""Upload media
Returns the media ID on success. The media ID is used when sending
the snap.
"""
if not os.path.exists(path):
raise ValueError('No such file: {0}'.format(path))
with open(path, 'rb') as f:
data = f.read()
media_type = get_media_type(data)
if media_type is None:
raise ValueError('Could not determine media type for given data')
media_id = make_media_id(self.username)
now = str(timestamp())
r = self._request('/ph/upload', {
'media_id': media_id,
'type': media_type,
'timestamp': now,
'username': self.username,
'zipped': '0'
}, {'now': now, 'gauth': self._get_gauth()}, files={'data': data})
return media_id if len(r.content) == 0 else None
def send(self, media_id, recipients, time=5):
"""Send a snap. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully.
"""
now = str(timestamp())
recipients = '["' + '","'.join(recipients) + '"]'
r = self._request('/loq/send', {
'media_id': media_id,
'zipped': '0',
'recipients': recipients,
'username': self.username,
'time': time,
'timestamp': now,
'features_map': '{}'
}, {'now': now, 'gauth': self._get_gauth()})
return len(r.content) == 0
def send_to_story(self, media_id, time=5, media_type=0, is_zip=0):
"""Send a snap to your story. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully.
"""
now = str(timestamp())
r = self._request('/bq/post_story', {
'username': self.username,
'media_id': media_id,
'client_id': media_id,
'time': time,
'type': media_type,
'zipped': is_zip
}, {'now': now, 'gauth': self._get_gauth()})
return r.json()
def get_conversation_info(self, tos):
messages = {}
if not isinstance(tos, list):
tos = [tos]
for to in tos:
auth_info = self._get_conversation_auth(to)
if 'messaging_auth' in auth_info:
payload = auth_info['messaging_auth']['payload']
mac = auth_info['messaging_auth']['mac']
conv_id = str(uuid1())
messages = {
'presences': {self.username: True, to: False},
'receiving_video': False,
'supports_here': True,
'header': {
'auth': {
'mac': mac,
'payload': payload
},
'to': [to],
'conv_id': self.username + "~" + to,
'from': self.username,
'conn_sequence_number': 0
},
'retried': False,
'id': conv_id,
'type': 'presence'
}
now = str(timestamp())
r = self._request('/loq/conversation_post_messages',{
'auth_token': self._get_gauth(),
'messages': messages,
'timestamp': now,
'username': self.username
},{'now': now, 'gauth': self._get_gauth()})
return r
def clear_feed(self):
"""Clear the user's feed
Returns true if feed was successfully cleared.
"""
r = self._request('clear', {
'username': self.username
})
return len(r.content) == 0
def get_snaptag(self):
"""Get a QR code-like image used to add friends on Snapchat.
Returns False if unable to get a QR code.
"""
updates = self.get_updates()
if not updates:
return False
else:
qr_path = updates['updates_response']['qr_path']
now = str(timestamp())
r = self._request('/bq/snaptag_download', {
'image': qr_path,
'username': self.username,
'timestamp': now
}, {
'now': now,
'gauth': self._get_gauth()
})
return r.content
def get_my_story(self):
now = str(timestamp())
r = self._request('/bq/stories', {
'timestamp': now,
'screen_height_in': 4.527565,
'screen_height_px': 1920,
'screen_width_in': 2.5590599,
'screen_width_px': 1080,
'username': self.username,
'features_map': {}
})
result = r.json()['my_stories']
return result
| |
from collections import deque
import numpy as np
from scipy.stats import itemfreq
import theano
from theano import shared
import theano.tensor as T
import lasagne
from qtable import QAgent
from simple_envs import SimpleMaze
class QAgentNN(QAgent):
""" Neural-network-based Q Learning Agent
This agent replaces the Q table in a canonical q agent with a neural net.
Its inputs are the observed state and its outputs are the q values for
each action.
The training of the network is performed periodically with randomly
selected batch of past experiences. This technique is also known as
Experience Replay. The loss function is defined following the Bellman
iteration equation.
Different from the techniques presented in the original DeepMind paper.
We apply re-scaling on the reward to make it fit better into the value
range of output layer (e.g. (-1, +1)). This can be helpful for scenarios
in which the value of reward has a large dynamic range. Another
modification is that we employ separate buffers in the replay memory
for different actions. This can speed-up convergence in non-stationary
and highly-action-skewed cases.
QAgentNN reuses much of the interface methods of QAgent. The reset(),
imporove_translate_(), reinforce_(), update_table_(), lookup_table_(),
and act_() methods are redefined to overwrite/encapsulate the original
functionality.
"""
def __init__(self,
dim_state, range_state, # state
f_build_net=None, # network
batch_size=100, learning_rate=0.01, momentum=0.9, # SGD
update_period=1, freeze_period=1, # schedule
reward_scaling=1, reward_scaling_update='fixed', rs_period=1, # reward
memory_size=500, num_buffer=1, # memory
**kwargs):
"""Initialize NN-based Q Agent
Parameters
----------
dim_state : dimensions of observation. 3-D tensor.
range_state : lower and upper bound of each observation dimension.
4-D tensor (d1, d2, d3, 2).
f_build_net : Function handle for building DQN, and returns a
Lasagne output layer.
batch_size : batch size for mini-batch SGD.
learning_rate : step size of a single gradient descent step.
momentum : faction of old weight values kept during gradient descent.
reward_scaling : initial value for inverse reward scaling factor
reward_scaling_update : 'fixed' & 'adaptive'
update_period : number of epochs per SGD update.
freeze_period : number of SGD updates per target network sync.
memory_size : size of replay memory (each buffer).
num_buffer : number of buffers used in replay memory
kwargs :
Returns
-------
"""
super(QAgentNN, self).__init__(**kwargs)
self.DIM_STATE = dim_state # mush be in form (d1, d2, d3), i.e. three dimensions
if range_state is not None: # lower and upper bound on observation
self.STATE_MEAN = (np.array(range_state)[:, :, :, 1]+np.array(range_state)[:, :, :, 0])/2.0
self.STATE_MAG = (np.array(range_state)[:, :, :, 1]-np.array(range_state)[:, :, :, 0])/2.0
else:
self.STATE_MEAN = np.zeros(self.DIM_STATE)
self.STATE_MAG = np.ones(self.DIM_STATE)
self.FREEZE_PERIOD = freeze_period
self.MEMORY_SIZE = memory_size
self.BATCH_SIZE = batch_size
self.LEARNING_RATE = learning_rate
self.MOMENTUM = momentum
self.UPDATE_PERIOD = update_period
self.REWARD_SCALING_UPDATE = reward_scaling_update
self.RS_PERIOD = rs_period
self.fun_build_net = f_build_net if f_build_net is not None else QAgentNN.build_net_
self.rs = reward_scaling
self.freeze_counter = self.FREEZE_PERIOD - 1
self.update_counter = self.UPDATE_PERIOD - 1
self.rs_counter = self.RS_PERIOD - 1
# set q table as a NN
self.fun_train_qnn, self.fun_adapt_rs, self.fun_clone_target, self.fun_q_lookup, self.fun_rs_lookup = \
self.init_fun_(self.fun_build_net,
self.DIM_STATE, self.BATCH_SIZE, self.GAMMA,
self.LEARNING_RATE, self.MOMENTUM,
self.rs, self.REWARD_SCALING_UPDATE)
self.replay_memory = QAgentNN.ReplayMemory(memory_size, batch_size, dim_state, len(self.ACTIONS), num_buffer)
def reset(self, foget_table=False, foget_memory=False, **kwargs):
self.freeze_counter = self.FREEZE_PERIOD - 1
self.update_counter = self.UPDATE_PERIOD - 1
self.rs_counter = self.RS_PERIOD - 1
if foget_table:
self.fun_train_qnn, \
self.fun_adapt_rs, \
self.fun_clone_target, \
self.fun_q_lookup, \
self.fun_rs_lookup = self.init_fun_(
self.fun_build_net,
self.DIM_STATE, self.BATCH_SIZE, self.GAMMA, self.LEARNING_RATE, self.MOMENTUM,
self.rs, self.REWARD_SCALING_UPDATE
)
if foget_memory:
self.ReplayMemory.reset()
kwargs['foget_table'] = foget_table
super(QAgentNN, self).reset(**kwargs)
return
def improve_translate_(self, last_observation, last_action, last_reward, observation):
# try invoking super-class
return super(QAgentNN, self).improve_translate_(
last_observation, last_action, last_reward, observation
)
def reinforce_(self, last_state, last_action, last_reward, state):
"""Train the network
Put the latest experience into replay memory and eriodically train
the network with batch of random samples sampled from the replay
memory.
Freeze parameters of the target network during non-training epochs.
Will not update the network if the state or reward passes in is None
or the replay memory is yet to be filled up.
Parameters
----------
last_state : last agent state
last_action :
last_reward : reward from last action
state :
Returns : training loss or None
-------
"""
# Check if this is a valid experience
if last_state is None:
if self.verbose > 0:
print " "*4 + "QAgentNN.reinforce_():",
print "last_state is None."
return None
elif last_reward is None:
if self.verbose > 0:
print " "*4 + "QAgentNN.reinforce_():",
print "last_reward is None."
return None
elif state is None:
if self.verbose > 0:
print " "*4 + "QAgentNN.reinforce_():",
print "state is None."
return None
# Store latest experience into replay memory
idx_action = self.ACTIONS.index(last_action)
self.replay_memory.update(last_state, idx_action, last_reward, state)
# Update networks
if not self.replay_memory.isfilled():
if self.verbose > 0:
print " "*4 + "QAgentNN.reinforce_():",
print "unfull memory."
else:
# Log counter progress
if self.verbose > 0:
print " "*4 + "QAgentNN.reinforce_():",
print "update counter {}, freeze counter {}, rs counter {}.".format(
self.update_counter, self.freeze_counter, self.rs_counter)
loss = None
if self.update_counter == 0:
last_states, last_actions, last_rewards, states = self.replay_memory.sample_batch()
loss = self.update_table_(last_states, last_actions, last_rewards, states)
self.update_counter = self.UPDATE_PERIOD - 1
if self.verbose > 0:
print " "*4 + "QAgentNN.reinforce_():",
print "update loss is {}, reward_scaling is {}".format(
loss, self.rs
)
if self.verbose > 1:
freq = itemfreq(last_actions)
print " "*8 + "QAgentNN.reinforce_():",
print "batch action distribution: {}".format(
{self.ACTIONS[int(freq[i, 0])]: 1.0 * freq[i, 1] /
self.BATCH_SIZE for i in range(freq.shape[0])}
)
else:
self.update_counter -= 1
return loss
return None
def act_(self, state):
# Escalate to QAgent.act_().
# Pass None state if memory is not full to invoke random action.
if self.is_memory_filled():
return super(QAgentNN, self).act_(state)
else:
if self.verbose > 2:
print " "*8 + "QAgentNN.act_():",
print "unfull memory."
return super(QAgentNN, self).act_(None)
def update_table_(self, last_state, last_action, reward, current_state):
loss = self.fun_train_qnn(
self.rescale_state(last_state),
last_action,
reward,
self.rescale_state(current_state)
)
# Sync target network with non-target network
if self.freeze_counter == 0:
self.fun_clone_target()
self.freeze_counter = self.FREEZE_PERIOD - 1
else:
self.freeze_counter -= 1
# adjust reward scaling values
if self.REWARD_SCALING_UPDATE=='adaptive':
if self.rs_counter == 0:
loss = self.fun_adapt_rs(
self.rescale_state(last_state),
last_action,
reward,
self.rescale_state(current_state)
)
self.rs_counter = self.RS_PERIOD - 1
self.rs = self.fun_rs_lookup()
else:
self.rs_counter -= 1
return loss
def lookup_table_(self, state):
state_var = np.zeros(tuple([1]+list(self.DIM_STATE)), dtype=np.float32)
state_var[0, :] = state
return self.fun_q_lookup(self.rescale_state(state_var)).ravel().tolist()
def is_memory_filled(self):
return self.replay_memory.isfilled()
def rescale_state(self, states):
return (states-self.STATE_MEAN)/self.STATE_MAG
@staticmethod
def build_net_(input_var=None, input_shape=None, num_outputs=None):
if input_shape is None or num_outputs is None:
raise ValueError('State or Action dimension not given!')
l_in = lasagne.layers.InputLayer(shape=input_shape, input_var=input_var)
l_hid1 = lasagne.layers.DenseLayer(
l_in, num_units=500,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_hid2 = lasagne.layers.DenseLayer(
l_hid1, num_units=500,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_out = lasagne.layers.DenseLayer(
l_hid2, num_units=num_outputs,
nonlinearity=lasagne.nonlinearities.tanh)
return l_out
def init_fun_(self, f_build_net,
dim_state, batch_size, gamma,
learning_rate, momentum,
reward_scaling, reward_scaling_update):
"""Define and compile function to train and evaluate network
:param f_build_net: function to build dqn
:param dim_state: dimensions of a single state tensor
:param batch_size:
:param gamma: future reward discount factor
:param learning_rate:
:param momentum:
:param reward_scaling:
:param reward_scaling_update:
:return:
"""
self.qnn = f_build_net(
None, tuple([None]+list(self.DIM_STATE)), len(self.ACTIONS)
)
self.qnn_target = f_build_net(
None, tuple([None]+list(self.DIM_STATE)), len(self.ACTIONS)
)
if len(dim_state) != 3:
raise ValueError("We only support 3 dimensional states.")
# inputs
# state: (BATCH_SIZE, MEMORY_LENGTH, DIM_STATE[0], DIM_STATE[1])
old_states, new_states = T.tensor4s('old_states', 'new_states')
actions = T.ivector('actions') # (BATCH_SIZE, 1)
rewards = T.vector('rewards') # (BATCH_SIZE, 1)
rs = shared(value=reward_scaling*1.0, name='reward_scaling')
# intermediates
predict_q = lasagne.layers.get_output(
layer_or_layers=self.qnn, inputs=old_states
)
predict_q_inference = lasagne.layers.get_output(
layer_or_layers=self.qnn, inputs=old_states, deterministic=True
)
predict_next_q = lasagne.layers.get_output(
layer_or_layers=self.qnn_target, inputs=new_states
)
target_q = rewards/rs + gamma*T.max(predict_next_q, axis=1)
# penalty
singularity = 1+1e-4
penalty = T.mean(
1/T.pow(predict_q[T.arange(batch_size), actions]-singularity, 2) +
1/T.pow(predict_q[T.arange(batch_size), actions]+singularity, 2) -
2
)
# outputs
loss = T.mean(
(predict_q[T.arange(batch_size), actions] - target_q)**2
) + (1e-5)*penalty
# weight update formulas (mini-batch SGD with momentum)
params = lasagne.layers.get_all_params(self.qnn, trainable=True)
grads = T.grad(loss, params)
grads = lasagne.updates.total_norm_constraint(grads, 10)
updates = lasagne.updates.nesterov_momentum(
grads, params, learning_rate=learning_rate, momentum=momentum
)
updates_rs = lasagne.updates.nesterov_momentum(
loss, [rs], learning_rate=learning_rate, momentum=momentum
)
# functions
fun_train_qnn = theano.function(
[old_states, actions, rewards, new_states],
loss, updates=updates, allow_input_downcast=True
)
fun_adapt_rs = theano.function(
[old_states, actions, rewards, new_states],
loss, updates=updates_rs, allow_input_downcast=True
)
def fun_clone_target():
lasagne.layers.helper.set_all_param_values(
self.qnn_target,
lasagne.layers.helper.get_all_param_values(self.qnn)
)
fun_q_lookup = theano.function(
[old_states], predict_q_inference, allow_input_downcast=True
)
fun_rs_lookup = rs.get_value
return fun_train_qnn, fun_adapt_rs, fun_clone_target, fun_q_lookup, fun_rs_lookup
class ReplayMemory(object):
"""Replay memory
Buffers the past "memory_size) (s, a, r, s') tuples in a circular buffer, and provides method to sample a random
batch from it.
"""
def __init__(self, memory_size, batch_size, dim_state, num_actions, num_buffers=1):
self.MEMORY_SIZE = memory_size
self.BATCH_SIZE = batch_size
self.DIM_STATE = dim_state
self.NUM_ACTIONS = num_actions
self.NUM_BUFFERS = num_buffers
self.buffer_old_state = np.zeros(tuple([self.NUM_BUFFERS, memory_size]+list(self.DIM_STATE)), dtype=np.float32)
self.buffer_action = np.zeros((self.NUM_BUFFERS, memory_size, ), dtype=np.int32)
self.buffer_reward = np.zeros((self.NUM_BUFFERS, memory_size, ), dtype=np.float32)
self.buffer_new_state = np.zeros(tuple([self.NUM_BUFFERS, memory_size]+list(self.DIM_STATE)), dtype=np.float32)
self.top = [-1]*self.NUM_BUFFERS
self.filled = [False]*self.NUM_BUFFERS
def update(self, last_state, idx_action, last_reward, new_state):
buffer_idx = idx_action % self.NUM_BUFFERS
top = (self.top[buffer_idx]+1) % self.MEMORY_SIZE
self.buffer_old_state[buffer_idx, top, :] = last_state
self.buffer_action[buffer_idx, top] = idx_action
self.buffer_reward[buffer_idx, top] = last_reward
self.buffer_new_state[buffer_idx, top, :] = new_state
if not self.filled[buffer_idx]:
self.filled[buffer_idx] |= (top == (self.MEMORY_SIZE-1))
self.top[buffer_idx] = top
def sample_batch(self):
sample_idx = np.random.randint(0, self.MEMORY_SIZE, (self.BATCH_SIZE,))
buffer_idx = np.random.randint(0, self.NUM_BUFFERS, (self.BATCH_SIZE,))
return (self.buffer_old_state[buffer_idx, sample_idx, :],
self.buffer_action[buffer_idx, sample_idx],
self.buffer_reward[buffer_idx, sample_idx],
self.buffer_new_state[buffer_idx, sample_idx, :])
def isfilled(self):
return all(self.filled)
def reset(self):
self.top = [-1]*self.NUM_BUFFERS
self.filled = [False]*self.NUM_BUFFERS
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description
# -----------
# Print information about Android DEX files
import sys
import os
import argparse
import traceback
import lief
from lief import DEX
EXIT_STATUS = 0
terminal_rows, terminal_columns = 100, 100
try:
terminal_rows, terminal_columns = os.popen('stty size', 'r').read().split()
except ValueError:
pass
class exceptions_handler(object):
func = None
def __init__(self, exceptions, on_except_callback=None):
self.exceptions = exceptions
self.on_except_callback = on_except_callback
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = args[0]
return self
try:
return self.func(*args, **kwargs)
except self.exceptions as e:
global EXIT_STATUS
print("{} raised: {}".format(self.func.__name__, e))
EXIT_STATUS = 1
if self.on_except_callback is not None:
self.on_except_callback(e)
else:
print("-" * 60)
print("Exception in {}: {}".format(self.func.__name__, e))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback)
print("-" * 60)
@exceptions_handler(Exception)
def print_information(dexfile):
print("== Information ==")
format_str = "{:<30} {:<30}"
format_hex = "{:<30} 0x{:<28x}"
format_dec = "{:<30} {:<30d}"
version = dexfile.version
print("DEX File version: {}".format(version))
print("")
@exceptions_handler(Exception)
def print_header(dexfile):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
print("== Header ==")
header = dexfile.header
print(header)
@exceptions_handler(Exception)
def print_classes(dexfile):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
classes = dexfile.classes
print("== Classes ==")
for cls in classes:
print(cls)
@exceptions_handler(Exception)
def print_fields(dexfile):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
fields = dexfile.fields
print("== Fields ==")
for f in fields:
print(f)
@exceptions_handler(Exception)
def print_methods(dexfile):
format_str = "{:<33} {:<30}"
format_hex = "{:<33} 0x{:<28x}"
format_dec = "{:<33} {:<30d}"
methods = dexfile.methods
print("== Methods ==")
for m in methods:
print(m)
@exceptions_handler(Exception)
def print_strings(dexfile):
print("== Strings ==")
for s in dexfile.strings:
print(s)
@exceptions_handler(Exception)
def print_types(dexfile):
print("== Types ==")
for t in dexfile.types:
print(t)
@exceptions_handler(Exception)
def print_prototypes(dexfile):
print("== Prototypes ==")
for t in dexfile.prototypes:
print(t)
@exceptions_handler(Exception)
def print_map(dexfile):
print("== Map ==")
print(dexfile.map)
def main():
parser = argparse.ArgumentParser(usage='%(prog)s [options] DEX files')
parser.add_argument('-a', '--all',
action='store_true', dest='show_all',
help='Show all information')
parser.add_argument('-H', '--header',
action='store_true', dest='show_header',
help='Display header')
parser.add_argument('-c', '--classes',
action='store_true', dest='show_classes',
help='Display classes')
parser.add_argument('-f', '--fields',
action='store_true', dest='show_fields',
help='Display Fields')
parser.add_argument('-m', '--methods',
action='store_true', dest='show_methods',
help='Display Methods')
parser.add_argument('-s', '--strings',
action='store_true', dest='show_strings',
help='Display Strings')
parser.add_argument('-t', '--types',
action='store_true', dest='show_types',
help='Display Types')
parser.add_argument('-p', '--prototypes',
action='store_true', dest='show_prototypes',
help='Display Prototypes')
parser.add_argument('-M', '--map',
action='store_true', dest='show_map',
help='Display Map')
parser.add_argument("file",
metavar="<dex-file>",
help='Target DEX File')
logger_group = parser.add_argument_group('Logger')
verbosity = logger_group.add_mutually_exclusive_group()
verbosity.add_argument('--debug',
dest='main_verbosity',
action='store_const',
const=lief.logging.LOGGING_LEVEL.DEBUG)
verbosity.add_argument('--trace',
dest='main_verbosity',
action='store_const',
const=lief.logging.LOGGING_LEVEL.TRACE)
verbosity.add_argument('--info',
dest='main_verbosity',
action='store_const',
const=lief.logging.LOGGING_LEVEL.INFO)
verbosity.add_argument('--warn',
dest='main_verbosity',
action='store_const',
const=lief.logging.LOGGING_LEVEL.WARNING)
verbosity.add_argument('--err',
dest='main_verbosity',
action='store_const',
const=lief.logging.LOGGING_LEVEL.ERROR)
verbosity.add_argument('--critical',
dest='main_verbosity',
action='store_const',
const=lief.logging.LOGGING_LEVEL.CRITICAL)
parser.set_defaults(main_verbosity=lief.logging.LOGGING_LEVEL.WARNING)
args = parser.parse_args()
lief.logging.set_level(args.main_verbosity)
binary = None
try:
dexfile = DEX.parse(args.file)
except lief.exception as e:
print(e)
sys.exit(1)
print_information(dexfile)
if args.show_header or args.show_all:
print_header(dexfile)
if (args.show_classes or args.show_all) and len(dexfile.classes) > 0:
print_classes(dexfile)
if (args.show_fields or args.show_all) and len(dexfile.fields) > 0:
print_fields(dexfile)
if (args.show_methods or args.show_all) and len(dexfile.methods) > 0:
print_methods(dexfile)
if (args.show_strings or args.show_all) and len(dexfile.strings) > 0:
print_strings(dexfile)
if (args.show_types or args.show_all) and len(dexfile.types) > 0:
print_types(dexfile)
if (args.show_prototypes or args.show_all) and len(dexfile.prototypes) > 0:
print_prototypes(dexfile)
if args.show_map or args.show_all:
print_map(dexfile)
sys.exit(EXIT_STATUS)
if __name__ == "__main__":
main()
| |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from keras.models import Model
from keras.layers import Dense, Embedding, Input , Activation
from keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GRU
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.layers import Flatten , Conv1D , GlobalMaxPooling1D , GlobalAveragePooling1D, MaxPooling1D
from keras.models import Sequential
import re , os
import logging, gensim , random
from gensim.models import word2vec
from keras.layers.merge import concatenate
import nltk
from collections import OrderedDict
import sys
from nltk.tokenize import word_tokenize
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
# Arguments
text: Input text (string).
filters: Sequence of characters to filter out.
lower: Whether to convert the input to lowercase.
split: Sentence split marker (string).
# Returns
A list of words (or tokens).
"""
if lower:
text = text.lower()
if sys.version_info < (3,) and isinstance(text, unicode):
translate_map = dict((ord(c), unicode(split)) for c in filters)
else:
translate_map = maketrans(filters, split * len(filters))
text = text.translate(translate_map)
#seq = text.split(split)
seq = text.split()
#seq = word_tokenize(text)
#print("text:",seq)
#pos_seq = nltk.pos_tag(text)
#return [i for i in seq if i]
return nltk.pos_tag(seq)
class TokenizerPOS(text.Tokenizer):
"""Text tokenization utility class.
This class allows to vectorize a text corpus, by turning each
text into either a sequence of integers (each integer being the index
of a token in a dictionary) or into a vector where the coefficient
for each token could be binary, based on word count, based on tf-idf...
# Arguments
num_words: the maximum number of words to keep, based
on word frequency. Only the most common `num_words` words will
be kept.
filters: a string where each element is a character that will be
filtered from the texts. The default is all punctuation, plus
tabs and line breaks, minus the `'` character.
lower: boolean. Whether to convert the texts to lowercase.
split: character or string to use for token splitting.
char_level: if True, every character will be treated as a token.
oov_token: if given, it will be added to word_index and used to
replace out-of-vocabulary words during text_to_sequence calls
By default, all punctuation is removed, turning the texts into
space-separated sequences of words
(words maybe include the `'` character). These sequences are then
split into lists of tokens. They will then be indexed or vectorized.
`0` is a reserved index that won't be assigned to any word.
"""
def __init__(self, num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ',
char_level=False,
oov_token=None,
**kwargs):
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `Tokenizer` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
self.word_counts = OrderedDict()
self.word_docs = {}
self.pos_counts = OrderedDict()
self.pos_docs = {}
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = 0
self.char_level = char_level
self.oov_token = oov_token
def texts_to_sequences(self, texts):
"""Transforms each text in texts in a sequence of integers.
Only top "num_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
texts: A list of texts (strings).
# Returns
A list of sequences.
"""
res = []
res_pos = []
for vect,vect_pos in self.texts_to_sequences_generator(texts):
res.append(vect)
res_pos.append(vect_pos)
return res , res_pos
def texts_to_sequences_generator(self, texts):
"""Transforms each text in texts in a sequence of integers.
Only top "num_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
texts: A list of texts (strings).
# Yields
Yields individual sequences.
"""
num_words = self.num_words
for text in texts:
seq = text_to_word_sequence(text,self.filters,self.lower,self.split)
vect = []
res_pos = []
for w,p in seq:
i = self.word_index.get(w)
j = self.word_index_pos.get(p)
if i is not None:
if num_words and i >= num_words:
continue
else:
vect.append(i)
res_pos.append(j)
elif self.oov_token is not None:
i = self.word_index.get(self.oov_token)
j = self.word_index_pos.get(self.oov_token)
if i is not None:
vect.append(i)
res_pos.append(j)
yield vect , res_pos
def fit_on_texts(self, texts):
"""Updates internal vocabulary based on a list of texts.
Required before using `texts_to_sequences` or `texts_to_matrix`.
# Arguments
texts: can be a list of strings,
or a generator of strings (for memory-efficiency)
"""
self.document_count = 0
print(len(texts))
for text in texts:
self.document_count += 1
seq = text_to_word_sequence(text,self.filters,self.lower,self.split)
#print(str(seq))
for w,pos in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
if pos in self.pos_counts:
self.pos_counts[pos] += 1
else:
self.pos_counts[pos] = 1
for w in set([w for w,pos in seq]):
if w in self.word_docs:
self.word_docs[w] += 1
else:
self.word_docs[w] = 1
for pos in set([pos for w,pos in seq]):
if pos in self.pos_docs:
self.pos_docs[pos] += 1
else:
self.pos_docs[pos] = 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
sorted_voc = [wc[0] for wc in wcounts]
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))
pcounts = list(self.pos_counts.items())
pcounts.sort(key=lambda x: x[1], reverse=True)
sorted_voc_pos = [wc[0] for wc in pcounts]
# note that index 0 is reserved, never assigned to an existing word
self.word_index_pos = dict(list(zip(sorted_voc_pos, list(range(1, len(sorted_voc_pos) + 1)))))
if self.oov_token is not None:
i = self.word_index.get(self.oov_token)
if i is None:
self.word_index[self.oov_token] = len(self.word_index) + 1
i = self.word_index_pos.get(self.oov_token)
if i is None:
self.word_index_pos[self.oov_token] = len(self.word_index_pos) + 1
self.index_docs = {}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
self.index_docs_pos = {}
for w, c in list(self.pos_docs.items()):
self.index_docs_pos[self.word_index_pos[w]] = c
### --------------------> conf
list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
max_features = 20000
######## ARMONY #####################################
# maxlen 200 (2x)
# EMBEDDING_DIM 100 (x) <---
# GRU 100 (layers = 1) (x)
# num_dense 100 (x)
#####################################################
maxlen = 600
EMBEDDING_DIM_1 = 300
we_fn_1='glove.840B.300d.txt'
EMBEDDING_DIM_2 = 100
rate_drop_dense = 0.2
num_dense = EMBEDDING_DIM_1 + EMBEDDING_DIM_2
batch_size = 32
epochs = 10
### --------------------------> load data
train = pd.read_csv("data/train.csv")
#train = train[:2000]
test = pd.read_csv("data/test.csv")
#test = test[:2000]
train = train.sample(frac=1)
# pre-processing
def pre_process_pre_trained_embed(train,test):
print('>> Indexing word vectors ...')
embeddings_index_1 = {}
f = open(os.path.join('data', we_fn_1))
for line in f:
values = line.split(' ')
word = values[0] #print("values:",values)
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index_1[word] = coefs
f.close()
print('Found %s word vectors. [1]' % len(embeddings_index_1))
print(">> pre-processing ... ")
list_sentences_train = train["comment_text"].fillna("__NA__").values
y = train[list_classes].values
list_sentences_test = test["comment_text"].fillna("__NA__").values
# TokenizerPOS
tokenizer = TokenizerPOS(num_words=max_features)
tokenizer.fit_on_texts(list(list_sentences_train) + list(list_sentences_test))
list_tokenized_train = tokenizer.texts_to_sequences(list(list_sentences_train))
list_tokenized_test = tokenizer.texts_to_sequences(list(list_sentences_test))
### ------------ word
word_index = tokenizer.word_index
X_t = sequence.pad_sequences(list_tokenized_train[0], maxlen=maxlen)
X_te = sequence.pad_sequences(list_tokenized_test[0], maxlen=maxlen)
# prepare embedding matrix
print('>> Preparing embedding matrix 1...')
num_words = min(max_features, len(word_index))
embedding_matrix_1 = np.zeros((num_words, EMBEDDING_DIM_1))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_index_1.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix_1[i] = embedding_vector
### ------------ POS
X_t_POS = sequence.pad_sequences(list_tokenized_train[1], maxlen=maxlen)
X_te_POS = sequence.pad_sequences(list_tokenized_test[1], maxlen=maxlen)
return X_t, X_te, y , embedding_matrix_1 , X_t_POS , X_te_POS
def get_bidirectional(embed_size_1 = 200 , embedding_matrix_1 = None, embed_size_2 = 200 ,
#num_lstm = 50 ,
rate_drop_dense = 0.1,
num_dense = 50):
print(">> get_model_bidirectional_avg [pre-trained word embeddings]<<")
#embedding_layer = Embedding(max_features,embed_size,weights=[embedding_matrix],input_length=maxlen,trainable=True)
embedding_layer_1 = Embedding(max_features,embed_size_1,weights=[embedding_matrix_1],input_length=maxlen)
embedding_layer_2 = Embedding(max_features,embed_size_2,input_length=maxlen)
inp1 = Input(shape=(maxlen, ) , dtype='int32')
inp2 = Input(shape=(maxlen, ) , dtype='int32')
x1 = embedding_layer_1(inp1)
x2 = embedding_layer_2(inp2)
x = concatenate([x1, x2],axis=2)
x = Bidirectional(GRU(num_dense, return_sequences=True, dropout=rate_drop_dense, recurrent_dropout=rate_drop_dense,trainable=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(num_dense, activation="relu")(x)
x = Dropout(rate_drop_dense)(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=[inp1,inp2], outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
#optimizer='nadam',
metrics=['accuracy'])
return model
# train
X_t, X_te, y , embedding_matrix_1 , X_t_POS , X_te_POS = pre_process_pre_trained_embed(train=train,test=test)
model = get_bidirectional(embed_size_1 = EMBEDDING_DIM_1 , embedding_matrix_1 = embedding_matrix_1, embed_size_2 = EMBEDDING_DIM_2 , rate_drop_dense = rate_drop_dense,num_dense = num_dense)
print(model.summary())
file_path="weights_base.best.hdf5"
checkpoint = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=0)
callbacks_list = [checkpoint, early] #early
model.fit([X_t,X_t_POS], y, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=callbacks_list, shuffle=True)
# predict
print(">>> predicting on test set ... ")
model.load_weights(file_path)
y_test = model.predict([X_te,X_te_POS])
#sub
sample_submission = pd.read_csv("data/sample_submission.csv")
sample_submission[list_classes] = y_test
sample_submission.to_csv("sub_gru11_Embed_POS_dropout02_2.csv.gz", index=False , compression='gzip')
| |
from __future__ import unicode_literals
import logging
from django.core import exceptions as django_exceptions
from django.db import transaction, IntegrityError
from waldur_core.core.models import StateMixin
from waldur_core.structure import models as structure_models
from ..openstack import models as openstack_models, apps as openstack_apps
from . import apps, log, models, utils
logger = logging.getLogger(__name__)
def _log_scheduled_action(resource, action, action_details):
class_name = resource.__class__.__name__.lower()
message = _get_action_message(action, action_details)
log.event_logger.openstack_resource_action.info(
'Operation "%s" has been scheduled for %s "%s"' % (message, class_name, resource.name),
event_type=_get_action_event_type(action, 'scheduled'),
event_context={'resource': resource, 'action_details': action_details},
)
def _log_succeeded_action(resource, action, action_details):
class_name = resource.__class__.__name__.lower()
message = _get_action_message(action, action_details)
log.event_logger.openstack_resource_action.info(
'Successfully executed "%s" operation for %s "%s"' % (message, class_name, resource.name),
event_type=_get_action_event_type(action, 'succeeded'),
event_context={'resource': resource, 'action_details': action_details},
)
def _log_failed_action(resource, action, action_details):
class_name = resource.__class__.__name__.lower()
message = _get_action_message(action, action_details)
log.event_logger.openstack_resource_action.warning(
'Failed to execute "%s" operation for %s "%s"' % (message, class_name, resource.name),
event_type=_get_action_event_type(action, 'failed'),
event_context={'resource': resource, 'action_details': action_details},
)
def _get_action_message(action, action_details):
return action_details.pop('message', action)
def _get_action_event_type(action, event_state):
return 'resource_%s_%s' % (action.replace(' ', '_').lower(), event_state)
def log_action(sender, instance, created=False, **kwargs):
""" Log any resource action.
Example of logged volume extend action:
{
'event_type': 'volume_extend_succeeded',
'message': 'Successfully executed "Extend volume from 1024 MB to 2048 MB" operation for volume "pavel-test"',
'action_details': {'old_size': 1024, 'new_size': 2048}
}
"""
resource = instance
if created or not resource.tracker.has_changed('action'):
return
if resource.state == StateMixin.States.UPDATE_SCHEDULED:
_log_scheduled_action(resource, resource.action, resource.action_details)
if resource.state == StateMixin.States.OK:
_log_succeeded_action(
resource, resource.tracker.previous('action'), resource.tracker.previous('action_details'))
elif resource.state == StateMixin.States.ERRED:
_log_failed_action(
resource, resource.tracker.previous('action'), resource.tracker.previous('action_details'))
def log_snapshot_schedule_creation(sender, instance, created=False, **kwargs):
if not created:
return
snapshot_schedule = instance
log.event_logger.openstack_snapshot_schedule.info(
'Snapshot schedule "%s" has been created' % snapshot_schedule.name,
event_type='resource_snapshot_schedule_created',
event_context={'resource': snapshot_schedule.source_volume, 'snapshot_schedule': snapshot_schedule},
)
def log_snapshot_schedule_action(sender, instance, created=False, **kwargs):
snapshot_schedule = instance
if created or not snapshot_schedule.tracker.has_changed('is_active'):
return
context = {'resource': snapshot_schedule.source_volume, 'snapshot_schedule': snapshot_schedule}
if snapshot_schedule.is_active:
log.event_logger.openstack_snapshot_schedule.info(
'Snapshot schedule "%s" has been activated' % snapshot_schedule.name,
event_type='resource_snapshot_schedule_activated',
event_context=context,
)
else:
if snapshot_schedule.error_message:
message = 'Snapshot schedule "%s" has been deactivated because of error: %s' % (
snapshot_schedule.name, snapshot_schedule.error_message)
else:
message = 'Snapshot schedule "%s" has been deactivated' % snapshot_schedule.name
log.event_logger.openstack_snapshot_schedule.info(
message,
event_type='resource_snapshot_schedule_deactivated',
event_context=context,
)
def log_snapshot_schedule_deletion(sender, instance, **kwargs):
snapshot_schedule = instance
log.event_logger.openstack_snapshot_schedule.info(
'Snapshot schedule "%s" has been deleted' % snapshot_schedule.name,
event_type='resource_snapshot_schedule_deleted',
event_context={'resource': snapshot_schedule.source_volume, 'snapshot_schedule': snapshot_schedule},
)
def log_backup_schedule_creation(sender, instance, created=False, **kwargs):
if not created:
return
backup_schedule = instance
log.event_logger.openstack_backup_schedule.info(
'Backup schedule "%s" has been created' % backup_schedule.name,
event_type='resource_backup_schedule_created',
event_context={'resource': backup_schedule.instance, 'backup_schedule': backup_schedule},
)
def log_backup_schedule_action(sender, instance, created=False, **kwargs):
backup_schedule = instance
if created or not backup_schedule.tracker.has_changed('is_active'):
return
context = {'resource': backup_schedule.instance, 'backup_schedule': backup_schedule}
if backup_schedule.is_active:
log.event_logger.openstack_backup_schedule.info(
'Backup schedule "%s" has been activated' % backup_schedule.name,
event_type='resource_backup_schedule_activated',
event_context=context,
)
else:
if backup_schedule.error_message:
message = 'Backup schedule "%s" has been deactivated because of error: %s' % (
backup_schedule.name, backup_schedule.error_message)
else:
message = 'Backup schedule "%s" has been deactivated' % backup_schedule.name
log.event_logger.openstack_backup_schedule.info(
message,
event_type='resource_backup_schedule_deactivated',
event_context=context,
)
def log_backup_schedule_deletion(sender, instance, **kwargs):
backup_schedule = instance
log.event_logger.openstack_backup_schedule.info(
'Backup schedule "%s" has been deleted' % backup_schedule.name,
event_type='resource_backup_schedule_deleted',
event_context={'resource': backup_schedule.instance, 'backup_schedule': backup_schedule},
)
def update_service_settings_credentials(sender, instance, created=False, **kwargs):
"""
Updates service settings credentials on tenant user_password or user_username change.
It is possible to change a user password in tenant,
as service settings copies tenant user password on creation it has to be update on change.
"""
if created:
return
tenant = instance
if tenant.tracker.has_changed('user_password') or tenant.tracker.has_changed('user_username'):
service_settings = structure_models.ServiceSettings.objects.filter(scope=tenant).first()
if service_settings:
service_settings.username = tenant.user_username
service_settings.password = tenant.user_password
service_settings.save()
class BaseSynchronizationHandler(object):
"""
This class provides signal handlers for synchronization of OpenStack properties
when parent OpenStack resource are created, updated or deleted.
Security groups, floating IPs, networks and subnets are implemented as
resources in openstack application. However they are implemented as service properties
in the openstack_tenant application.
"""
property_model = None
resource_model = None
fields = []
def get_tenant(self, resource):
return resource.tenant
def get_service_settings(self, resource):
try:
return structure_models.ServiceSettings.objects.get(scope=self.get_tenant(resource),
type=apps.OpenStackTenantConfig.service_name)
except (django_exceptions.ObjectDoesNotExist, django_exceptions.MultipleObjectsReturned):
return
def get_service_property(self, resource, settings):
try:
return self.property_model.objects.get(settings=settings, backend_id=resource.backend_id)
except (django_exceptions.ObjectDoesNotExist, django_exceptions.MultipleObjectsReturned):
return
def map_resource_to_dict(self, resource):
return {field: getattr(resource, field) for field in self.fields}
def create_service_property(self, resource, settings):
defaults = dict(name=resource.name, **self.map_resource_to_dict(resource))
try:
with transaction.atomic():
return self.property_model.objects.get_or_create(
settings=settings,
backend_id=resource.backend_id,
defaults=defaults
)
except IntegrityError:
logger.warning('Could not create %s with backend ID %s '
'and service settings %s due to concurrent update.',
self.property_model, resource.backend_id, settings)
def update_service_property(self, resource, settings):
service_property = self.get_service_property(resource, settings)
if not service_property:
return
params = self.map_resource_to_dict(resource)
for key, value in params.items():
setattr(service_property, key, value)
service_property.name = resource.name
service_property.save()
return service_property
def create_handler(self, sender, instance, name, source, target, **kwargs):
"""
Creates service property on resource transition from 'CREATING' state to 'OK'.
"""
if source == StateMixin.States.CREATING and target == StateMixin.States.OK:
settings = self.get_service_settings(instance)
if settings and not self.get_service_property(instance, settings):
self.create_service_property(instance, settings)
def update_handler(self, sender, instance, name, source, target, **kwargs):
"""
Updates service property on resource transition from 'UPDATING' state to 'OK'.
"""
if source == StateMixin.States.UPDATING and target == StateMixin.States.OK:
settings = self.get_service_settings(instance)
if settings:
self.update_service_property(instance, settings)
def delete_handler(self, sender, instance, **kwargs):
"""
Deletes service property on resource deletion
"""
settings = self.get_service_settings(instance)
if not settings:
return
service_property = self.get_service_property(instance, settings)
if not service_property:
return
service_property.delete()
class FloatingIPHandler(BaseSynchronizationHandler):
property_model = models.FloatingIP
resource_model = openstack_models.FloatingIP
fields = ('address', 'backend_network_id', 'runtime_state')
class SecurityGroupHandler(BaseSynchronizationHandler):
property_model = models.SecurityGroup
resource_model = openstack_models.SecurityGroup
fields = ('description',)
def map_rules(self, security_group, openstack_security_group):
return [models.SecurityGroupRule(
protocol=rule.protocol,
from_port=rule.from_port,
to_port=rule.to_port,
cidr=rule.cidr,
backend_id=rule.backend_id,
security_group=security_group,
) for rule in openstack_security_group.rules.iterator()]
def create_service_property(self, resource, settings):
service_property, _ = super(SecurityGroupHandler, self).create_service_property(resource, settings)
if resource.rules.count() > 0:
group_rules = self.map_rules(service_property, resource)
service_property.rules.bulk_create(group_rules)
return service_property
def update_service_property(self, resource, settings):
service_property = super(SecurityGroupHandler, self).update_service_property(resource, settings)
if not service_property:
return
service_property.rules.all().delete()
group_rules = self.map_rules(service_property, resource)
service_property.rules.bulk_create(group_rules)
return service_property
class NetworkHandler(BaseSynchronizationHandler):
property_model = models.Network
resource_model = openstack_models.Network
fields = ('is_external', 'segmentation_id', 'type')
class SubNetHandler(BaseSynchronizationHandler):
property_model = models.SubNet
resource_model = openstack_models.SubNet
fields = ('allocation_pools', 'cidr', 'dns_nameservers', 'enable_dhcp', 'ip_version')
def get_tenant(self, resource):
return resource.network.tenant
def map_resource_to_dict(self, resource):
params = super(SubNetHandler, self).map_resource_to_dict(resource)
params['network'] = models.Network.objects.get(backend_id=resource.network.backend_id)
return params
resource_handlers = (
FloatingIPHandler(),
SecurityGroupHandler(),
NetworkHandler(),
SubNetHandler(),
)
def sync_certificates_between_openstack_service_with_openstacktenant_service(sender, instance, action, **kwargs):
"""
Copies certifications links in original service settings to derived openstack tenant service settings.
Handling works only for OpenStack service settings and ignored for all others.
"""
service_settings = instance
if (action not in ['post_add', 'post_remove', 'post_clear'] or
service_settings.type != openstack_apps.OpenStackConfig.service_name):
return
tenants = openstack_models.Tenant.objects.filter(service_project_link__service__settings=service_settings)
if not tenants:
return
openstack_settings = structure_models.ServiceSettings.objects.filter(scope__in=tenants)
with transaction.atomic():
for settings in openstack_settings:
settings.certifications.clear()
settings.certifications.add(*service_settings.certifications.all())
def copy_certifications_from_openstack_service_to_openstacktenant_service(sender, instance, created=False, **kwargs):
if not created or instance.type != apps.OpenStackTenantConfig.service_name:
return
tenant = instance.scope
if not isinstance(tenant, openstack_models.Tenant):
return
admin_settings = tenant.service_project_link.service.settings
with transaction.atomic():
instance.certifications.clear()
instance.certifications.add(*admin_settings.certifications.all())
def copy_flavor_exclude_regex_to_openstacktenant_service_settings(sender, instance, created=False, **kwargs):
if not created or instance.type != apps.OpenStackTenantConfig.service_name:
return
tenant = instance.scope
if not isinstance(tenant, openstack_models.Tenant):
return
admin_settings = tenant.service_project_link.service.settings
instance.options['flavor_exclude_regex'] = admin_settings.options.get('flavor_exclude_regex', '')
instance.save(update_fields=['options'])
def create_service_from_tenant(sender, instance, created=False, **kwargs):
if not created:
return
if structure_models.ServiceSettings.objects.filter(
scope=instance,
type=apps.OpenStackTenantConfig.service_name,
).exists():
return
tenant = instance
admin_settings = tenant.service_project_link.service.settings
customer = tenant.service_project_link.project.customer
service_settings = structure_models.ServiceSettings.objects.create(
name=tenant.name,
scope=tenant,
customer=customer,
type=apps.OpenStackTenantConfig.service_name,
backend_url=admin_settings.backend_url,
username=tenant.user_username,
password=tenant.user_password,
domain=admin_settings.domain,
options={
'availability_zone': tenant.availability_zone,
'tenant_id': tenant.backend_id,
},
)
service = models.OpenStackTenantService.objects.create(
settings=service_settings,
customer=customer,
)
models.OpenStackTenantServiceProjectLink.objects.create(
service=service,
project=tenant.service_project_link.project,
)
def update_service_settings(sender, instance, created=False, **kwargs):
tenant = instance
if created or not (set(['external_network_id', 'name']) & set(tenant.tracker.changed())):
return
try:
service_settings = structure_models.ServiceSettings.objects.get(scope=tenant,
type=apps.OpenStackTenantConfig.service_name)
except structure_models.ServiceSettings.DoesNotExist:
return
else:
service_settings.options['external_network_id'] = tenant.external_network_id
service_settings.name = tenant.name
service_settings.save()
def sync_price_list_item_for_flavor(sender, instance, created=False, **kwargs):
if created:
utils.sync_price_list_item(instance)
| |
from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from nose.tools import eq_
from kitsune.access.tests import permission
from kitsune.flagit.models import FlaggedObject
from kitsune.forums import POSTS_PER_PAGE
from kitsune.forums.events import NewPostEvent, NewThreadEvent
from kitsune.forums.models import Forum, Thread, Post
from kitsune.forums.tests import ForumTestCase, forum, thread, post
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import user
YESTERDAY = datetime.now() - timedelta(days=1)
class ForumModelTestCase(ForumTestCase):
def test_forum_absolute_url(self):
f = forum(save=True)
eq_('/forums/%s' % f.slug,
f.get_absolute_url())
def test_thread_absolute_url(self):
t = thread(save=True)
eq_('/forums/%s/%s' % (t.forum.slug, t.id),
t.get_absolute_url())
def test_post_absolute_url(self):
t = thread(save=True)
# Fill out the first page with posts from yesterday.
p1 = post(thread=t, created=YESTERDAY, save=True)
for i in range(POSTS_PER_PAGE - 1):
post(thread=t, created=YESTERDAY, save=True)
# Second page post from today.
p2 = post(thread=t, save=True)
url = reverse('forums.posts',
kwargs={'forum_slug': p1.thread.forum.slug,
'thread_id': p1.thread.id})
eq_(urlparams(url, hash='post-%s' % p1.id), p1.get_absolute_url())
url = reverse('forums.posts',
kwargs={'forum_slug': p2.thread.forum.slug,
'thread_id': p2.thread.id})
exp_ = urlparams(url, hash='post-%s' % p2.id, page=2)
eq_(exp_, p2.get_absolute_url())
def test_post_page(self):
t = thread(save=True)
# Fill out the first page with posts from yesterday.
page1 = []
for i in range(POSTS_PER_PAGE):
page1.append(post(thread=t, created=YESTERDAY, save=True))
# Second page post from today.
p2 = post(thread=t, save=True)
for p in page1:
eq_(1, p.page)
eq_(2, p2.page)
def test_delete_post_removes_flag(self):
"""Deleting a post also removes the flags on that post."""
p = post(save=True)
u = user(save=True)
FlaggedObject.objects.create(
status=0, content_object=p, reason='language', creator_id=u.id)
eq_(1, FlaggedObject.objects.count())
p.delete()
eq_(0, FlaggedObject.objects.count())
def test_thread_last_post_url(self):
t = thread(save=True)
post(thread=t, save=True)
lp = t.last_post
f = t.forum
url = t.get_last_post_url()
assert f.slug in url
assert str(t.id) in url
assert '#post-%s' % lp.id in url
assert 'last=%s' % lp.id in url
def test_last_post_updated(self):
# Adding/Deleting the last post in a thread and forum should
# update the last_post field
orig_post = post(created=YESTERDAY, save=True)
t = orig_post.thread
# add a new post, then check that last_post is updated
new_post = post(thread=t, content="test", save=True)
f = Forum.objects.get(id=t.forum_id)
t = Thread.objects.get(id=t.id)
eq_(f.last_post.id, new_post.id)
eq_(t.last_post.id, new_post.id)
# delete the new post, then check that last_post is updated
new_post.delete()
f = Forum.objects.get(id=f.id)
t = Thread.objects.get(id=t.id)
eq_(f.last_post.id, orig_post.id)
eq_(t.last_post.id, orig_post.id)
def test_public_access(self):
# Assert Forums think they're publicly viewable and postable
# at appropriate times.
# By default, users have access to forums that aren't restricted.
u = user(save=True)
f = forum(save=True)
assert f.allows_viewing_by(u)
assert f.allows_posting_by(u)
def test_access_restriction(self):
"""Assert Forums are inaccessible to the public when restricted."""
# If the a forum has 'forums_forum.view_in_forum' permission defined,
# then it isn't public by default. If it has
# 'forums_forum.post_in_forum', then it isn't postable to by default.
f = forum(save=True)
ct = ContentType.objects.get_for_model(f)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=f.id, save=True)
permission(codename='forums_forum.post_in_forum', content_type=ct,
object_id=f.id, save=True)
unprivileged_user = user(save=True)
assert not f.allows_viewing_by(unprivileged_user)
assert not f.allows_posting_by(unprivileged_user)
def test_move_updates_last_posts(self):
# Moving the thread containing a forum's last post to a new
# forum should update the last_post of both
# forums. Consequently, deleting the last post shouldn't
# delete the old forum. [bug 588994]
# Setup forum to move latest thread from.
old_forum = forum(save=True)
t1 = thread(forum=old_forum, save=True)
p1 = post(thread=t1, created=YESTERDAY, save=True)
t2 = thread(forum=old_forum, save=True)
p2 = post(thread=t2, save=True) # Newest post of all.
# Setup forum to move latest thread to.
new_forum = forum(save=True)
t3 = thread(forum=new_forum, save=True)
p3 = post(thread=t3, created=YESTERDAY, save=True)
# Verify the last_post's are correct.
eq_(p2, Forum.objects.get(id=old_forum.id).last_post)
eq_(p3, Forum.objects.get(id=new_forum.id).last_post)
# Move the t2 thread.
t2 = Thread.objects.get(id=t2.id)
t2.forum = new_forum
t2.save()
# Old forum's last_post updated?
eq_(p1.id, Forum.objects.get(id=old_forum.id).last_post_id)
# New forum's last_post updated?
eq_(p2.id, Forum.objects.get(id=new_forum.id).last_post_id)
# Delete the post, and both forums should still exist:
p2.delete()
eq_(1, Forum.objects.filter(id=old_forum.id).count())
eq_(1, Forum.objects.filter(id=new_forum.id).count())
def test_delete_removes_watches(self):
f = forum(save=True)
NewThreadEvent.notify('me@me.com', f)
assert NewThreadEvent.is_notifying('me@me.com', f)
f.delete()
assert not NewThreadEvent.is_notifying('me@me.com', f)
def test_last_post_creator_deleted(self):
"""Delete the creator of the last post and verify forum survives."""
# Create a post and verify it is the last one in the forum.
post_ = post(content="test", save=True)
forum_ = post_.thread.forum
eq_(forum_.last_post.id, post_.id)
# Delete the post creator, then check the forum still exists
post_.author.delete()
forum_ = Forum.objects.get(id=forum_.id)
eq_(forum_.last_post, None)
class ThreadModelTestCase(ForumTestCase):
def test_delete_thread_with_last_forum_post(self):
# Deleting the thread with a forum's last post should update
# the last_post field on the forum
t = thread(save=True)
post(thread=t, save=True)
f = t.forum
last_post = f.last_post
# add a new thread and post, verify last_post updated
t = thread(title="test", forum=f, save=True)
p = post(thread=t, content="test", author=t.creator, save=True)
f = Forum.objects.get(id=f.id)
eq_(f.last_post.id, p.id)
# delete the post, verify last_post updated
t.delete()
f = Forum.objects.get(id=f.id)
eq_(f.last_post.id, last_post.id)
eq_(Thread.objects.filter(pk=t.id).count(), 0)
def test_delete_removes_watches(self):
t = thread(save=True)
NewPostEvent.notify('me@me.com', t)
assert NewPostEvent.is_notifying('me@me.com', t)
t.delete()
assert not NewPostEvent.is_notifying('me@me.com', t)
def test_delete_last_and_only_post_in_thread(self):
"""Deleting the only post in a thread should delete the thread"""
t = thread(save=True)
post(thread=t, save=True)
eq_(1, t.post_set.count())
t.delete()
eq_(0, Thread.objects.filter(pk=t.id).count())
class SaveDateTestCase(ForumTestCase):
"""
Test that Thread and Post save methods correctly handle created
and updated dates.
"""
delta = timedelta(milliseconds=3000)
def setUp(self):
super(SaveDateTestCase, self).setUp()
self.user = user(save=True)
self.thread = thread(save=True)
self.forum = self.thread.forum
def assertDateTimeAlmostEqual(self, a, b, delta, msg=None):
"""Assert that two datetime objects are within `range` (a timedelta).
"""
diff = abs(a - b)
assert diff < abs(delta), msg or '%s ~= %s' % (a, b)
def test_save_thread_no_created(self):
"""Saving a new thread should behave as if auto_add_now was set."""
t = thread(forum=self.forum, title='foo', creator=self.user,
save=True)
t.save()
now = datetime.now()
self.assertDateTimeAlmostEqual(now, t.created, self.delta)
def test_save_thread_created(self):
# Saving a new thread that already has a created date should
# respect that created date.
created = datetime(1992, 1, 12, 9, 48, 23)
t = thread(forum=self.forum, title='foo', creator=self.user,
created=created, save=True)
t.save()
eq_(created, t.created)
def test_save_old_thread_created(self):
"""Saving an old thread should not change its created date."""
t = thread(created=YESTERDAY, save=True)
t = Thread.objects.get(id=t.id)
created = t.created
# Now make an update to the thread and resave. Created shouldn't
# change.
t.title = 'new title'
t.save()
t = Thread.objects.get(id=t.id)
eq_(created, t.created)
def test_save_new_post_no_timestamps(self):
# Saving a new post should behave as if auto_add_now was set on
# created and auto_now set on updated.
p = post(thread=self.thread, content='bar', author=self.user,
save=True)
now = datetime.now()
self.assertDateTimeAlmostEqual(now, p.created, self.delta)
self.assertDateTimeAlmostEqual(now, p.updated, self.delta)
def test_save_old_post_no_timestamps(self):
"""Saving an existing post should update the updated date."""
created = datetime(2010, 5, 4, 14, 4, 22)
updated = datetime(2010, 5, 4, 14, 4, 31)
p = post(thread=self.thread, created=created, updated=updated,
save=True)
eq_(updated, p.updated)
p.content = 'baz'
p.updated_by = self.user
p.save()
now = datetime.now()
self.assertDateTimeAlmostEqual(now, p.updated, self.delta)
eq_(created, p.created)
def test_save_new_post_timestamps(self):
# Saving a new post should allow you to override auto_add_now-
# and auto_now-like functionality.
created_ = datetime(1992, 1, 12, 10, 12, 32)
p = Post(thread=self.thread, content='bar', author=self.user,
created=created_, updated=created_)
p.save()
eq_(created_, p.created)
eq_(created_, p.updated)
def test_content_parsed_sanity(self):
"""The content_parsed field is populated."""
p = post(thread=self.thread, content='yet another post', save=True)
eq_('<p>yet another post\n</p>', p.content_parsed)
| |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2013-2014 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2014-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2015 Yann Sionneau <ys@m-labs.hk>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk200", 0,
Subsignal("p", Pins("AD12"), IOStandard("LVDS")),
Subsignal("n", Pins("AD11"), IOStandard("LVDS"))
),
("clk156", 0,
Subsignal("p", Pins("K28"), IOStandard("LVDS_25")),
Subsignal("n", Pins("K29"), IOStandard("LVDS_25"))
),
("cpu_reset", 0, Pins("AB7"), IOStandard("LVCMOS15")),
# Leds
("user_led", 0, Pins("AB8"), IOStandard("LVCMOS15")),
("user_led", 1, Pins("AA8"), IOStandard("LVCMOS15")),
("user_led", 2, Pins("AC9"), IOStandard("LVCMOS15")),
("user_led", 3, Pins("AB9"), IOStandard("LVCMOS15")),
("user_led", 4, Pins("AE26"), IOStandard("LVCMOS25")),
("user_led", 5, Pins("G19"), IOStandard("LVCMOS25")),
("user_led", 6, Pins("E18"), IOStandard("LVCMOS25")),
("user_led", 7, Pins("F16"), IOStandard("LVCMOS25")),
# Buttons
("user_btn_c", 0, Pins("G12"), IOStandard("LVCMOS25")),
("user_btn_n", 0, Pins("AA12"), IOStandard("LVCMOS15")),
("user_btn_s", 0, Pins("AB12"), IOStandard("LVCMOS15")),
("user_btn_w", 0, Pins("AC6"), IOStandard("LVCMOS15")),
("user_btn_e", 0, Pins("AG5"), IOStandard("LVCMOS15")),
# Switches
("user_dip_btn", 0, Pins("Y29"), IOStandard("LVCMOS25")),
("user_dip_btn", 1, Pins("W29"), IOStandard("LVCMOS25")),
("user_dip_btn", 2, Pins("AA28"), IOStandard("LVCMOS25")),
("user_dip_btn", 3, Pins("Y28"), IOStandard("LVCMOS25")),
# SMA
("user_sma_clock", 0,
Subsignal("p", Pins("L25"), IOStandard("LVDS_25"),
Misc("DIFF_TERM=TRUE")),
Subsignal("n", Pins("K25"), IOStandard("LVDS_25"),
Misc("DIFF_TERM=TRUE"))
),
("user_sma_clock_p", 0, Pins("L25"), IOStandard("LVCMOS25")),
("user_sma_clock_n", 0, Pins("K25"), IOStandard("LVCMOS25")),
("user_sma_gpio_p", 0, Pins("Y23"), IOStandard("LVCMOS25")),
("user_sma_gpio_n", 0, Pins("Y24"), IOStandard("LVCMOS25")),
# I2C
("i2c", 0,
Subsignal("scl", Pins("K21")),
Subsignal("sda", Pins("L21")),
IOStandard("LVCMOS25")),
# Serial
("serial", 0,
Subsignal("cts", Pins("L27")),
Subsignal("rts", Pins("K23")),
Subsignal("tx", Pins("K24")),
Subsignal("rx", Pins("M19")),
IOStandard("LVCMOS25")
),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AH12 AG13 AG12 AF12 AJ12 AJ13 AJ14 AH14",
"AK13 AK14 AF13 AE13 AJ11 AH11 AK10 AK11"),
IOStandard("SSTL15")),
Subsignal("ba", Pins("AH9 AG9 AK9"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("AD9"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("AC11"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("AE9"), IOStandard("SSTL15")),
Subsignal("cs_n", Pins("AC12"), IOStandard("SSTL15")),
Subsignal("dm", Pins(
"Y16 AB17 AF17 AE16 AK5 AJ3 AF6 AC7"),
IOStandard("SSTL15")),
Subsignal("dq", Pins(
"AA15 AA16 AC14 AD14 AA17 AB15 AE15 Y15",
"AB19 AD16 AC19 AD17 AA18 AB18 AE18 AD18",
"AG19 AK19 AG18 AF18 AH19 AJ19 AE19 AD19",
"AK16 AJ17 AG15 AF15 AH17 AG14 AH15 AK15",
"AK8 AK6 AG7 AF7 AF8 AK4 AJ8 AJ6",
"AH5 AH6 AJ2 AH2 AH4 AJ4 AK1 AJ1",
"AF1 AF2 AE4 AE3 AF3 AF5 AE1 AE5",
"AC1 AD3 AC4 AC5 AE6 AD6 AC2 AD4"),
IOStandard("SSTL15_T_DCI")),
Subsignal("dqs_p", Pins("AC16 Y19 AJ18 AH16 AH7 AG2 AG4 AD2"),
IOStandard("DIFF_SSTL15")),
Subsignal("dqs_n", Pins("AC15 Y18 AK18 AJ16 AJ7 AH1 AG3 AD1"),
IOStandard("DIFF_SSTL15")),
Subsignal("clk_p", Pins("AG10"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("AH10"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("AF10"), IOStandard("SSTL15")),
Subsignal("odt", Pins("AD8"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("AK3"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
Misc("VCCAUX_IO=HIGH")
),
# SPIFlash
("spiflash", 0, # clock needs to be accessed through STARTUPE2
Subsignal("cs_n", Pins("U19")),
Subsignal("dq", Pins("P24", "R25", "R20", "R21")),
IOStandard("LVCMOS25")
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("AB23")),
Subsignal("cs_n", Pins("AC21")),
Subsignal("mosi", Pins("AB22"), Misc("PULLUP")),
Subsignal("miso", Pins("AC20"), Misc("PULLUP")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS25")
),
("sdcard", 0,
Subsignal("clk", Pins("AB23")),
Subsignal("cmd", Pins("AB22"), Misc("PULLUP True")),
Subsignal("data", Pins("AC20 AA23 AA22 AC21"), Misc("PULLUP True")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS25")
),
# GMII Ethernet
("eth_clocks", 0,
Subsignal("tx", Pins("M28")),
Subsignal("gtx", Pins("K30")),
Subsignal("rx", Pins("U27")),
IOStandard("LVCMOS25")
),
("eth", 0,
Subsignal("rst_n", Pins("L20")),
Subsignal("int_n", Pins("N30")),
Subsignal("mdio", Pins("J21")),
Subsignal("mdc", Pins("R23")),
Subsignal("rx_dv", Pins("R28")),
Subsignal("rx_er", Pins("V26")),
Subsignal("rx_data", Pins("U30 U25 T25 U28 R19 T27 T26 T28")),
Subsignal("tx_en", Pins("M27")),
Subsignal("tx_er", Pins("N29")),
Subsignal("tx_data", Pins("N27 N25 M29 L28 J26 K26 L30 J28")),
Subsignal("col", Pins("W19")),
Subsignal("crs", Pins("R30")),
IOStandard("LVCMOS25")
),
# LCD
("lcd", 0,
Subsignal("db", Pins("AA13 AA10 AA11 Y10")),
Subsignal("e", Pins("AB10")),
Subsignal("rs", Pins("Y11")),
Subsignal("rw", Pins("AB13")),
IOStandard("LVCMOS15")
),
# Rotary Encoder
("rotary", 0,
Subsignal("a", Pins("Y26")),
Subsignal("b", Pins("Y25")),
Subsignal("push", Pins("AA26")),
IOStandard("LVCMOS25")
),
# HDMI
("hdmi", 0,
Subsignal("d", Pins(
"B23 A23 E23 D23 F25 E25 E24 D24",
"F26 E26 G23 G24 J19 H19 L17 L18",
"K19 K20")),
Subsignal("de", Pins("H17")),
Subsignal("clk", Pins("K18")),
Subsignal("vsync", Pins("H20")),
Subsignal("hsync", Pins("J18")),
Subsignal("int", Pins("AH24")),
Subsignal("spdif", Pins("J17")),
Subsignal("spdif_out", Pins("G20")),
IOStandard("LVCMOS25")
),
# PCIe
("pcie_x1", 0,
Subsignal("rst_n", Pins("G25"), IOStandard("LVCMOS25")),
Subsignal("clk_p", Pins("U8")),
Subsignal("clk_n", Pins("U7")),
Subsignal("rx_p", Pins("M6")),
Subsignal("rx_n", Pins("M5")),
Subsignal("tx_p", Pins("L4")),
Subsignal("tx_n", Pins("L3"))
),
("pcie_x2", 0,
Subsignal("rst_n", Pins("G25"), IOStandard("LVCMOS25")),
Subsignal("clk_p", Pins("U8")),
Subsignal("clk_n", Pins("U7")),
Subsignal("rx_p", Pins("M6 P6")),
Subsignal("rx_n", Pins("M5 P5")),
Subsignal("tx_p", Pins("L4 M2")),
Subsignal("tx_n", Pins("L3 M1"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("G25"), IOStandard("LVCMOS25")),
Subsignal("clk_p", Pins("U8")),
Subsignal("clk_n", Pins("U7")),
Subsignal("rx_p", Pins("M6 P6 R4 T6")),
Subsignal("rx_n", Pins("M5 P5 R3 T5")),
Subsignal("tx_p", Pins("L4 M2 N4 P2")),
Subsignal("tx_n", Pins("L3 M1 N3 P1"))
),
("pcie_x8", 0,
Subsignal("rst_n", Pins("G25"), IOStandard("LVCMOS25")),
Subsignal("clk_p", Pins("U8")),
Subsignal("clk_n", Pins("U7")),
Subsignal("rx_p", Pins("M6 P6 R4 T6 V6 W4 Y6 AA4")),
Subsignal("rx_n", Pins("M5 P5 R3 T5 V5 W3 Y5 AA3")),
Subsignal("tx_p", Pins("L4 M2 N4 P2 T2 U4 V2 Y2")),
Subsignal("tx_n", Pins("L3 M1 N3 P1 T1 U3 V1 Y1"))
),
# SGMII Clk
("sgmii_clock", 0,
Subsignal("p", Pins("G8")),
Subsignal("n", Pins("G7"))
),
# SMA
("user_sma_mgt_refclk", 0,
Subsignal("p", Pins("J8")),
Subsignal("n", Pins("J7"))
),
("user_sma_mgt_tx", 0,
Subsignal("p", Pins("K2")),
Subsignal("n", Pins("K1"))
),
("user_sma_mgt_rx", 0,
Subsignal("p", Pins("K6")),
Subsignal("n", Pins("K5"))
),
# SFP
("sfp", 0, # inverted prior to HW rev 1.1
Subsignal("txp", Pins("H2")),
Subsignal("txn", Pins("H1")),
Subsignal("rxp", Pins("G4")),
Subsignal("rxn", Pins("G3")),
),
("sfp_tx", 0, # inverted prior to HW rev 1.1
Subsignal("p", Pins("H2")),
Subsignal("n", Pins("H1"))
),
("sfp_rx", 0, # inverted prior to HW rev 1.1
Subsignal("p", Pins("G4")),
Subsignal("n", Pins("G3"))
),
("sfp_tx_disable_n", 0, Pins("Y20"), IOStandard("LVCMOS25")),
("sfp_rx_los", 0, Pins("P19"), IOStandard("LVCMOS25")),
# SI5324
("si5324", 0,
Subsignal("rst_n", Pins("AE20"), IOStandard("LVCMOS25")),
Subsignal("int", Pins("AG24"), IOStandard("LVCMOS25"))
),
("si5324_clkin", 0,
Subsignal("p", Pins("W27"), IOStandard("LVDS_25")),
Subsignal("n", Pins("W28"), IOStandard("LVDS_25"))
),
("si5324_clkout", 0,
Subsignal("p", Pins("L8")),
Subsignal("n", Pins("L7"))
),
# Others
("vadj_on_b", 0, Pins("J27"), IOStandard("LVCMOS25")),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("HPC", {
"DP1_M2C_P" : "D6",
"DP1_M2C_N" : "D5",
"DP2_M2C_P" : "B6",
"DP2_M2C_N" : "B5",
"DP3_M2C_P" : "A8",
"DP3_M2C_N" : "A7",
"DP1_C2M_P" : "C4",
"DP1_C2M_N" : "C3",
"DP2_C2M_P" : "B2",
"DP2_C2M_N" : "B1",
"DP3_C2M_P" : "A4",
"DP3_C2M_N" : "A3",
"DP0_C2M_P" : "D2",
"DP0_C2M_N" : "D1",
"DP0_M2C_P" : "E4",
"DP0_M2C_N" : "E3",
"LA06_P" : "H30",
"LA06_N" : "G30",
"LA10_P" : "D29",
"LA10_N" : "C30",
"LA14_P" : "B28",
"LA14_N" : "A28",
"LA18_CC_P" : "F21",
"LA18_CC_N" : "E21",
"LA27_P" : "C19",
"LA27_N" : "B19",
"HA01_CC_P" : "H14",
"HA01_CC_N" : "G14",
"HA05_P" : "F15",
"HA05_N" : "E16",
"HA09_P" : "F12",
"HA09_N" : "E13",
"HA13_P" : "L16",
"HA13_N" : "K16",
"HA16_P" : "L15",
"HA16_N" : "K15",
"HA20_P" : "K13",
"HA20_N" : "J13",
"CLK1_M2C_P" : "D17",
"CLK1_M2C_N" : "D18",
"LA00_CC_P" : "C25",
"LA00_CC_N" : "B25",
"LA03_P" : "H26",
"LA03_N" : "H27",
"LA08_P" : "E29",
"LA08_N" : "E30",
"LA12_P" : "C29",
"LA12_N" : "B29",
"LA16_P" : "B27",
"LA16_N" : "A27",
"LA20_P" : "E19",
"LA20_N" : "D19",
"LA22_P" : "C20",
"LA22_N" : "B20",
"LA25_P" : "G17",
"LA25_N" : "F17",
"LA29_P" : "C17",
"LA29_N" : "B17",
"LA31_P" : "G22",
"LA31_N" : "F22",
"LA33_P" : "H21",
"LA33_N" : "H22",
"HA03_P" : "C12",
"HA03_N" : "B12",
"HA07_P" : "B14",
"HA07_N" : "A15",
"HA11_P" : "B13",
"HA11_N" : "A13",
"HA14_P" : "J16",
"HA14_N" : "H16",
"HA18_P" : "K14",
"HA18_N" : "J14",
"HA22_P" : "L11",
"HA22_N" : "K11",
"GBTCLK1_M2C_P" : "E8",
"GBTCLK1_M2C_N" : "E7",
"GBTCLK0_M2C_P" : "C8",
"GBTCLK0_M2C_N" : "C7",
"LA01_CC_P" : "D26",
"LA01_CC_N" : "C26",
"LA05_P" : "G29",
"LA05_N" : "F30",
"LA09_P" : "B30",
"LA09_N" : "A30",
"LA13_P" : "A25",
"LA13_N" : "A26",
"LA17_CC_P" : "F20",
"LA17_CC_N" : "E20",
"LA23_P" : "B22",
"LA23_N" : "A22",
"LA26_P" : "B18",
"LA26_N" : "A18",
"PG_M2C" : "J29",
"HA00_CC_P" : "D12",
"HA00_CC_N" : "D13",
"HA04_P" : "F11",
"HA04_N" : "E11",
"HA08_P" : "E14",
"HA08_N" : "E15",
"HA12_P" : "C15",
"HA12_N" : "B15",
"HA15_P" : "H15",
"HA15_N" : "G15",
"HA19_P" : "H11",
"HA19_N" : "H12",
"PRSNT_M2C_B" : "M20",
"CLK0_M2C_P" : "D27",
"CLK0_M2C_N" : "C27",
"LA02_P" : "H24",
"LA02_N" : "H25",
"LA04_P" : "G28",
"LA04_N" : "F28",
"LA07_P" : "E28",
"LA07_N" : "D28",
"LA11_P" : "G27",
"LA11_N" : "F27",
"LA15_P" : "C24",
"LA15_N" : "B24",
"LA19_P" : "G18",
"LA19_N" : "F18",
"LA21_P" : "A20",
"LA21_N" : "A21",
"LA24_P" : "A16",
"LA24_N" : "A17",
"LA28_P" : "D16",
"LA28_N" : "C16",
"LA30_P" : "D22",
"LA30_N" : "C22",
"LA32_P" : "D21",
"LA32_N" : "C21",
"HA02_P" : "D11",
"HA02_N" : "C11",
"HA06_P" : "D14",
"HA06_N" : "C14",
"HA10_P" : "A11",
"HA10_N" : "A12",
"HA17_CC_P" : "G13",
"HA17_CC_N" : "F13",
"HA21_P" : "J11",
"HA21_N" : "J12",
"HA23_P" : "L12",
"HA23_N" : "L13",
}
),
("LPC", {
"GBTCLK0_M2C_P" : "N8",
"GBTCLK0_M2C_N" : "N7",
"DP0_C2M_P" : "F2",
"DP0_C2M_N" : "F1",
"DP0_M2C_P" : "F6",
"DP0_M2C_N" : "F5",
"LA01_CC_P" : "AE23",
"LA01_CC_N" : "AF23",
"LA05_P" : "AG22",
"LA05_N" : "AH22",
"LA09_P" : "AK23",
"LA09_N" : "AK24",
"LA13_P" : "AB24",
"LA13_N" : "AC25",
"LA17_CC_P" : "AB27",
"LA17_CC_N" : "AC27",
"LA23_P" : "AH26",
"LA23_N" : "AH27",
"LA26_P" : "AK29",
"LA26_N" : "AK30",
"CLK0_M2C_P" : "AF22",
"CLK0_M2C_N" : "AG23",
"LA02_P" : "AF20",
"LA02_N" : "AF21",
"LA04_P" : "AH21",
"LA04_N" : "AJ21",
"LA07_P" : "AG25",
"LA07_N" : "AH25",
"LA11_P" : "AE25",
"LA11_N" : "AF25",
"LA15_P" : "AC24",
"LA15_N" : "AD24",
"LA19_P" : "AJ26",
"LA19_N" : "AK26",
"LA21_P" : "AG27",
"LA21_N" : "AG28",
"LA24_P" : "AG30",
"LA24_N" : "AH30",
"LA28_P" : "AE30",
"LA28_N" : "AF30",
"LA30_P" : "AB29",
"LA30_N" : "AB30",
"LA32_P" : "Y30",
"LA32_N" : "AA30",
"LA06_P" : "AK20",
"LA06_N" : "AK21",
"LA10_P" : "AJ24",
"LA10_N" : "AK25",
"LA14_P" : "AD21",
"LA14_N" : "AE21",
"LA18_CC_P" : "AD27",
"LA18_CC_N" : "AD28",
"LA27_P" : "AJ28",
"LA27_N" : "AJ29",
"CLK1_M2C_P" : "AG29",
"CLK1_M2C_N" : "AH29",
"LA00_CC_P" : "AD23",
"LA00_CC_N" : "AE24",
"LA03_P" : "AG20",
"LA03_N" : "AH20",
"LA08_P" : "AJ22",
"LA08_N" : "AJ23",
"LA12_P" : "AA20",
"LA12_N" : "AB20",
"LA16_P" : "AC22",
"LA16_N" : "AD22",
"LA20_P" : "AF26",
"LA20_N" : "AF27",
"LA22_P" : "AJ27",
"LA22_N" : "AK28",
"LA25_P" : "AC26",
"LA25_N" : "AD26",
"LA29_P" : "AE28",
"LA29_N" : "AF28",
"LA31_P" : "AD29",
"LA31_N" : "AE29",
"LA33_P" : "AC29",
"LA33_N" : "AC30",
}
),
("XADC", {
"GPIO0" : "AB25",
"GPIO1" : "AA25",
"GPIO2" : "AB28",
"GPIO3" : "AA27",
"VAUX0_N" : "J24",
"VAUX0_P" : "J23",
"VAUX8_N" : "L23",
"VAUX8_P" : "L22",
}
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk156"
default_clk_period = 1e9/156.5e6
def __init__(self, toolchain="vivado"):
XilinxPlatform.__init__(self, "xc7k325t-ffg900-2", _io, _connectors, toolchain=toolchain)
self.add_platform_command("""
set_property CFGBVS VCCO [current_design]
set_property CONFIG_VOLTAGE 2.5 [current_design]
""")
self.toolchain.bitstream_commands = ["set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]"]
self.toolchain.additional_commands = ["write_cfgmem -force -format bin -interface spix4 -size 16 -loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]
def create_programmer(self):
return OpenOCD("openocd_xc7_ft2232.cfg", "bscan_spi_xc7a325t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk200", loose=True), 1e9/200e6)
self.add_period_constraint(self.lookup_request("eth_clocks:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks:tx", loose=True), 1e9/125e6)
self.add_platform_command("set_property DCI_CASCADE {{32 34}} [get_iobanks 33]")
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""U-MOOC test suite.
This script runs all functional and units test in the U-MOOC project.
Here is how to use the script:
- download WebTest Python package from a URL below and put
the files in a folder of your choice, for example: tmp/webtest:
http://pypi.python.org/packages/source/W/WebTest/WebTest-1.4.2.zip
- update your Python path:
PYTHONPATH=$PYTHONPATH:/tmp/webtest
- invoke this test suite from the command line:
# Automatically find and run all Python tests in tests/*.
python tests/suite.py
# Run only tests matching shell glob *_functional_test.py in tests/*.
python tests/suite.py --pattern *_functional_test.py
# Run test method baz in unittest.TestCase Bar found in tests/foo.py.
python tests/suite.py --test_class_name tests.foo.Bar.baz
- review the output to make sure there are no errors or warnings
Good luck!
"""
__author__ = 'Sean Lip'
import argparse
import base64
import os
import shutil
import signal
import subprocess
import sys
import time
import unittest
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
import webtest
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import deferred
from google.appengine.ext import testbed
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--pattern', default='*.py',
help='shell pattern for discovering files containing tests', type=str)
_PARSER.add_argument(
'--test_class_name',
help='optional dotted module name of the test(s) to run', type=str)
_PARSER.add_argument(
'--integration_server_start_cmd',
help='script to start an external CB server', type=str)
# Base filesystem location for test data.
TEST_DATA_BASE = '/tmp/experimental/coursebuilder/test-data/'
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class TestBase(unittest.TestCase):
"""Base class for all U-MOOC tests."""
REQUIRES_INTEGRATION_SERVER = 1
INTEGRATION_SERVER_BASE_URL = 'http://localhost:8081'
def setUp(self):
super(TestBase, self).setUp()
# Map of object -> {symbol_string: original_value}
self._originals = {}
def tearDown(self):
self._unswap_all()
super(TestBase, self).tearDown()
def swap(self, source, symbol, new):
"""Swaps out source.symbol for a new value.
Allows swapping of members and methods:
myobject.foo = 'original_foo'
self.swap(myobject, 'foo', 'bar')
self.assertEqual('bar', myobject.foo)
myobject.baz() # -> 'original_baz'
self.swap(myobject, 'baz', lambda: 'quux')
self.assertEqual('quux', myobject.bar())
Swaps are automatically undone in tearDown().
Args:
source: object. The source object to swap from.
symbol: string. The name of the symbol to swap.
new: object. The new value to swap in.
"""
if source not in self._originals:
self._originals[source] = {}
if not self._originals[source].get(symbol, None):
self._originals[source][symbol] = getattr(source, symbol)
setattr(source, symbol, new)
# Allow protected method names. pylint: disable-msg=g-bad-name
def _unswap_all(self):
for source, symbol_to_value in self._originals.iteritems():
for symbol, value in symbol_to_value.iteritems():
setattr(source, symbol, value)
def shortDescription(self):
"""Additional information logged during unittest invocation."""
# Suppress default logging of docstrings. Instead log name/status only.
return None
class FunctionalTestBase(TestBase):
"""Base class for functional tests."""
def setUp(self):
super(FunctionalTestBase, self).setUp()
# e.g. TEST_DATA_BASE/tests/functional/tests/MyTestCase.
self.test_tempdir = os.path.join(
TEST_DATA_BASE, self.__class__.__module__.replace('.', os.sep),
self.__class__.__name__)
self.reset_filesystem()
def tearDown(self):
self.reset_filesystem(remove_only=True)
super(FunctionalTestBase, self).tearDown()
def reset_filesystem(self, remove_only=False):
if os.path.exists(self.test_tempdir):
shutil.rmtree(self.test_tempdir)
if not remove_only:
os.makedirs(self.test_tempdir)
class AppEngineTestBase(FunctionalTestBase):
"""Base class for tests that require App Engine services."""
def getApp(self): # pylint: disable-msg=g-bad-name
"""Returns the main application to be tested."""
raise Exception('Not implemented.')
def setUp(self): # pylint: disable-msg=g-bad-name
super(AppEngineTestBase, self).setUp()
empty_environ()
# setup an app to be tested
self.testapp = webtest.TestApp(self.getApp())
self.testbed = testbed.Testbed()
self.testbed.activate()
# configure datastore policy to emulate instantaneously and globally
# consistent HRD; we also patch dev_appserver in main.py to run under
# the same policy
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# declare any relevant App Engine service stubs here
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self): # pylint: disable-msg=g-bad-name
self.testbed.deactivate()
super(AppEngineTestBase, self).tearDown()
def execute_all_deferred_tasks(self, queue_name='default'):
"""Executes all pending deferred tasks."""
for task in self.taskq.GetTasks(queue_name):
deferred.run(base64.b64decode(task['body']))
def create_test_suite(parsed_args):
"""Loads all requested test suites.
By default, loads all unittest.TestCases found under the project root's
tests/ directory.
Args:
parsed_args: argparse.Namespace. Processed command-line arguments.
Returns:
unittest.TestSuite. The test suite populated with all tests to run.
"""
loader = unittest.TestLoader()
if parsed_args.test_class_name:
return loader.loadTestsFromName(parsed_args.test_class_name)
else:
return loader.discover(
os.path.dirname(__file__), pattern=parsed_args.pattern)
def start_integration_server(integration_server_start_cmd):
print 'Starting external server: %s' % integration_server_start_cmd
server = subprocess.Popen(integration_server_start_cmd)
time.sleep(3) # Wait for server to start up
return server
def stop_integration_server(server):
server.kill() # dev_appserver.py itself.
# The new dev appserver starts a _python_runtime.py process that isn't
# captured by start_integration_server and so doesn't get killed. Until it's
# done, our tests will never complete so we kill it manually.
pid = int(subprocess.Popen(
['pgrep', '-f', '_python_runtime.py'], stdout=subprocess.PIPE
).communicate()[0][:-1])
os.kill(pid, signal.SIGKILL)
def fix_sys_path():
"""Fix the sys.path to include GAE extra paths."""
import dev_appserver # pylint: disable=C6204
# dev_appserver.fix_sys_path() prepends GAE paths to sys.path and hides
# our classes like 'tests' behind other modules that have 'tests'.
# Here, unlike dev_appserver, we append the path instead of prepending it,
# so that our classes come first.
sys.path += dev_appserver.EXTRA_PATHS[:]
def main():
"""Starts in-process server and runs all test cases in this module."""
fix_sys_path()
parsed_args = _PARSER.parse_args()
test_suite = create_test_suite(parsed_args)
all_tags = set()
for test in iterate_tests(test_suite):
if hasattr(test, 'TAGS'):
all_tags.update(test.TAGS)
server = None
if TestBase.REQUIRES_INTEGRATION_SERVER in all_tags:
server = start_integration_server(
parsed_args.integration_server_start_cmd)
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
if server:
stop_integration_server(server)
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s errors, %s failures of '
' %s tests run.' % (
len(result.errors), len(result.failures), result.testsRun))
import tests.functional.actions as actions # pylint: disable-msg=g-import-not-at-top
count = len(actions.UNIQUE_URLS_FOUND.keys())
result.stream.writeln('INFO: Unique URLs found: %s' % count)
result.stream.writeln('INFO: All %s tests PASSED!' % result.testsRun)
if __name__ == '__main__':
appengine_config.gcb_force_default_encoding('ascii')
main()
| |
import datetime
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.test.client import Client
import mock
from mock import patch
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase, requires_login, requires_permission
from remo.profiles.tests import FunctionalAreaFactory, UserFactory
from remo.reports import ACTIVITY_EVENT_CREATE
from remo.reports.models import NGReport, NGReportComment
from remo.reports.tests import NGReportFactory, NGReportCommentFactory
class EditNGReportTests(RemoTestCase):
"""Tests related to New Generation Reports edit View."""
def test_new_report_initial_data(self):
user = UserFactory.create(groups=['Mentor'], userprofile__city='City',
userprofile__region='Region',
userprofile__country='Country',
userprofile__lat=0,
userprofile__lon=90)
with self.login(user) as client:
response = client.get(reverse('reports_new_ng_report'), user=user)
initial = response.context['report_form'].initial
eq_(initial['location'], 'City, Region, Country')
eq_(initial['latitude'], 0)
eq_(initial['longitude'], 90)
def test_get_as_owner(self):
report = NGReportFactory.create()
with self.login(report.user) as client:
response = client.get(report.get_absolute_edit_url(), user=report.user)
eq_(response.context['report'], report)
self.assertJinja2TemplateUsed(response, 'edit_ng_report.jinja')
def test_get_as_mentor(self):
user = UserFactory.create(groups=['Mentor'])
report = NGReportFactory.create()
with self.login(user) as client:
response = client.get(report.get_absolute_edit_url(), user=user)
eq_(response.context['report'], report)
self.assertJinja2TemplateUsed(response, 'edit_ng_report.jinja')
def test_get_as_admin(self):
user = UserFactory.create(groups=['Admin'])
report = NGReportFactory.create()
with self.login(user) as client:
response = client.get(report.get_absolute_edit_url(), user=user)
eq_(response.context['report'], report)
self.assertJinja2TemplateUsed(response, 'edit_ng_report.jinja')
@requires_permission()
def test_get_as_other_rep(self):
user = UserFactory.create()
report = NGReportFactory.create()
with self.login(user) as client:
client.get(report.get_absolute_edit_url(), user=user)
@requires_login()
def test_get_as_anonymous(self):
report = NGReportFactory.create()
client = Client()
client.get(report.get_absolute_edit_url())
@patch('remo.reports.views.messages.warning')
def test_get_uneditable(self, messages_mock):
report = NGReportFactory.create(activity__name='Month recap')
with self.login(report.user) as client:
client.get(report.get_absolute_edit_url(), user=report.user, follow=True)
messages_mock.assert_called_with(mock.ANY, 'You cannot edit this report.')
@patch('remo.reports.views.messages.success')
@patch('remo.reports.views.redirect', wraps=redirect)
@patch('remo.reports.views.forms.NGReportForm')
@patch('remo.reports.models.NGReport.get_absolute_url')
def test_create_new_report(self, get_absolute_url_mock, form_mock,
redirect_mock, messages_mock):
form_mock.is_valid.return_value = True
get_absolute_url_mock.return_value = 'main'
user = UserFactory.create()
with self.login(user) as client:
response = client.post(reverse('reports_new_ng_report'), user=user, follow=True)
eq_(response.status_code, 200)
messages_mock.assert_called_with(mock.ANY, 'Report successfully created.')
redirect_mock.assert_called_with('main')
ok_(form_mock().save.called)
@patch('remo.reports.views.messages.success')
@patch('remo.reports.views.redirect', wraps=redirect)
@patch('remo.reports.views.forms.NGReportForm')
@patch('remo.reports.models.NGReport.get_absolute_url')
def test_update_report(self, get_absolute_url_mock, form_mock,
redirect_mock, messages_mock):
form_mock.is_valid.return_value = True
get_absolute_url_mock.return_value = 'main'
report = NGReportFactory.create()
with self.login(report.user) as client:
response = client.post(report.get_absolute_edit_url(), user=report.user, follow=True)
eq_(response.status_code, 200)
messages_mock.assert_called_with(mock.ANY, 'Report successfully updated.')
redirect_mock.assert_called_with('main')
ok_(form_mock().save.called)
def test_get_non_existing_report(self):
user = UserFactory.create()
url = (reverse('reports_ng_edit_report',
kwargs={'display_name': user.userprofile.display_name,
'year': 3000,
'month': 'March',
'day': 1,
'id': 1}))
with self.login(user) as client:
response = client.get(url, user=user)
self.assertJinja2TemplateUsed(response, '404.jinja')
eq_(response.status_code, 404)
class DeleteNGReportTests(RemoTestCase):
@patch('remo.reports.views.redirect', wraps=redirect)
def test_as_owner(self, redirect_mock):
report = NGReportFactory.create()
with self.login(report.user) as client:
client.post(report.get_absolute_delete_url(), user=report.user)
ok_(not NGReport.objects.filter(pk=report.id).exists())
redirect_mock.assert_called_with('profiles_view_my_profile')
@requires_login()
def test_as_anonymous(self):
report = NGReportFactory.create()
client = Client()
client.post(report.get_absolute_delete_url(), data={})
ok_(NGReport.objects.filter(pk=report.id).exists())
@requires_permission()
def test_as_other_rep(self):
user = UserFactory.create()
report = NGReportFactory.create()
with self.login(user) as client:
client.post(report.get_absolute_delete_url(), user=user)
ok_(NGReport.objects.filter(pk=report.id).exists())
def test_get(self):
report = NGReportFactory.create()
client = Client()
client.get(report.get_absolute_delete_url(), user=report.user)
ok_(NGReport.objects.filter(pk=report.id).exists())
@patch('remo.reports.views.redirect', wraps=redirect)
def test_as_mentor(self, redirect_mock):
user = UserFactory.create(groups=['Mentor'])
report = NGReportFactory.create()
with self.login(user) as client:
client.post(report.get_absolute_delete_url(), user=user)
ok_(not NGReport.objects.filter(pk=report.id).exists())
redirect_mock.assert_called_with(
'profiles_view_profile',
display_name=report.user.userprofile.display_name)
@patch('remo.reports.views.redirect', wraps=redirect)
def test_as_admin(self, redirect_mock):
user = UserFactory.create(groups=['Admin'])
report = NGReportFactory.create()
with self.login(user) as client:
client.post(report.get_absolute_delete_url(), user=user)
ok_(not NGReport.objects.filter(pk=report.id).exists())
redirect_mock.assert_called_with('profiles_view_profile',
display_name=report.user.userprofile.display_name)
class ViewNGReportTests(RemoTestCase):
"""Tests related to New Generation Reports view_ng_report View."""
def test_get(self):
report = NGReportFactory.create()
with self.login(report.user) as client:
response = client.get(report.get_absolute_url(), user=report.user)
eq_(response.context['report'], report)
self.assertJinja2TemplateUsed(response, 'view_ng_report.jinja')
@patch('remo.reports.views.messages.success')
@patch('remo.reports.views.forms.NGReportCommentForm')
def test_post_a_comment(self, form_mock, messages_mock):
user = UserFactory.create()
report = NGReportFactory.create(user=user)
form_mock.is_valid.return_value = True
with self.login(user) as client:
response = client.post(report.get_absolute_url(),
user=user,
data={'comment': 'This is a comment'})
eq_(response.status_code, 200)
messages_mock.assert_called_with(
mock.ANY, 'Comment saved successfully.')
ok_(form_mock().save.called)
eq_(response.context['report'], report)
self.assertJinja2TemplateUsed(response, 'view_ng_report.jinja')
@patch('remo.reports.views.messages.success')
@patch('remo.reports.views.forms.NGVerifyReportForm')
def test_verify_report(self, form_mock, messages_mock):
user = UserFactory.create(groups=['Mentor'])
report = NGReportFactory.create(user=user)
form_mock.is_valid.return_value = True
with self.login(user) as client:
response = client.post(report.get_absolute_url(),
user=user,
data={'verified_activity': 'on'})
eq_(response.status_code, 200)
messages_mock.assert_called_with(
mock.ANY, 'Activity verified successfully.')
ok_(form_mock().save.called)
eq_(response.context['report'], report)
self.assertJinja2TemplateUsed(response, 'view_ng_report.jinja')
@patch('remo.reports.views.messages.error')
@patch('remo.reports.views.forms.NGVerifyReportForm')
@patch('remo.reports.views.redirect', wraps=redirect)
def test_verify_report_without_permissions(self, redirect_mock, form_mock,
messages_mock):
user = UserFactory.create(groups=['Rep'])
report = NGReportFactory.create(user=user)
form_mock.is_valid.return_value = True
with self.login(user) as client:
response = client.post(report.get_absolute_url(),
user=user,
data={'verified_activity': 'on'},
follow=True)
eq_(response.status_code, 200)
ok_(not form_mock().save.called)
messages_mock.assert_called_with(mock.ANY, 'Permission denied.')
redirect_mock.assert_called_with('main')
self.assertJinja2TemplateUsed(response, 'main.jinja')
@patch('remo.reports.views.messages.error')
@patch('remo.reports.views.forms.NGReportCommentForm')
@patch('remo.reports.views.redirect', wraps=redirect)
def test_post_a_comment_anonymous(self, redirect_mock, form_mock,
messages_mock):
form_mock.is_valid.return_value = True
report = NGReportFactory.create()
c = Client()
c.post(report.get_absolute_url(), data={})
ok_(not NGReportComment.objects.filter(report=report).exists())
messages_mock.assert_called_with(mock.ANY, 'Permission denied.')
redirect_mock.assert_called_with('main')
def test_get_uneditable(self):
report = NGReportFactory.create(activity__name=ACTIVITY_EVENT_CREATE)
with self.login(report.user) as client:
response = client.get(report.get_absolute_url(), user=report.user)
ok_(not response.context['editable'])
class DeleteNGReportCommentTests(RemoTestCase):
"""Tests related to comment deletion."""
@patch('remo.reports.views.redirect', wraps=redirect)
def test_as_owner(self, redirect_mock):
report = NGReportFactory.create()
report_comment = NGReportCommentFactory.create(report=report)
with self.login(report.user) as client:
client.post(report_comment.get_absolute_delete_url(), user=report.user)
ok_(not NGReportComment.objects.filter(pk=report_comment.id).exists())
redirect_mock.assert_called_with(report.get_absolute_url())
@requires_login()
def test_as_anonymous(self):
report = NGReportFactory.create()
report_comment = NGReportCommentFactory.create(report=report)
client = Client()
client.post(report_comment.get_absolute_delete_url(), data={})
ok_(NGReportComment.objects.filter(pk=report_comment.id).exists())
@requires_permission()
def test_as_other_rep(self):
user = UserFactory.create()
report = NGReportFactory.create()
report_comment = NGReportCommentFactory.create(report=report)
with self.login(user) as client:
client.post(report_comment.get_absolute_delete_url(), user=user)
ok_(NGReportComment.objects.filter(pk=report_comment.id).exists())
def test_get(self):
report = NGReportFactory.create()
report_comment = NGReportCommentFactory.create(report=report)
with self.login(report.user) as client:
client.get(report_comment.get_absolute_delete_url(), user=report.user)
ok_(NGReportComment.objects.filter(pk=report_comment.id).exists())
@patch('remo.reports.views.redirect', wraps=redirect)
def test_as_mentor(self, redirect_mock):
user = UserFactory.create(groups=['Mentor'])
report = NGReportFactory.create()
report_comment = NGReportCommentFactory.create(report=report)
with self.login(user) as client:
client.post(report_comment.get_absolute_delete_url(), user=user)
ok_(not NGReportComment.objects.filter(pk=report_comment.id).exists())
redirect_mock.assert_called_with(report.get_absolute_url())
@patch('remo.reports.views.redirect', wraps=redirect)
def test_as_admin(self, redirect_mock):
user = UserFactory.create(groups=['Admin'])
report = NGReportFactory.create()
report_comment = NGReportCommentFactory.create(report=report)
with self.login(user) as client:
client.post(report_comment.get_absolute_delete_url(), user=user)
ok_(not NGReportComment.objects.filter(pk=report_comment.id).exists())
redirect_mock.assert_called_with(report.get_absolute_url())
class ListNGReportTests(RemoTestCase):
"""Tests related to report listing."""
def test_list(self):
"""Test view report list page."""
mentor = UserFactory.create(groups=['Mentor'])
report = NGReportFactory.create(mentor=mentor)
response = Client().get(reverse('list_ng_reports'))
self.assertJinja2TemplateUsed(response, 'list_ng_reports.jinja')
eq_(response.context['pageheader'], 'Activities for Reps')
eq_(response.status_code, 200)
eq_(set(response.context['objects'].object_list), set([report]))
def test_list_rep(self):
"""Test page header context for rep."""
user = UserFactory.create(groups=['Rep'], first_name='Foo', last_name='Bar')
name = user.userprofile.display_name
report = NGReportFactory.create(user=user)
NGReportFactory.create()
with self.login(user) as client:
response = client.get(reverse('list_ng_reports_rep', kwargs={'rep': name}),
user=user)
eq_(response.context['pageheader'], 'Activities for Foo Bar')
eq_(set(response.context['objects'].object_list),
set([report]), 'Other Rep reports are listed')
def test_list_mentor(self):
"""Test page header context for mentor."""
mentor = UserFactory.create(groups=['Mentor'], first_name='Foo',
last_name='Bar')
name = mentor.userprofile.display_name
report_1 = NGReportFactory.create(mentor=mentor)
report_2 = NGReportFactory.create(mentor=mentor)
NGReportFactory.create()
response = Client().get(reverse('list_ng_reports_mentor',
kwargs={'mentor': name}), user=mentor)
msg = 'Activities for Reps mentored by Foo Bar'
eq_(response.context['pageheader'], msg)
eq_(set(response.context['objects'].object_list),
set([report_1, report_2]), 'Other Mentor reports are listed')
def test_get_invalid_order(self):
"""Test get invalid sort order."""
response = Client().get(reverse('list_ng_reports'), data={'sort_key': 'invalid'})
eq_(response.context['sort_key'], 'created_date_desc')
def test_future_not_listed(self):
report = NGReportFactory.create()
NGReportFactory.create(report_date=datetime.date(2999, 1, 1))
response = Client().get(reverse('list_ng_reports'))
eq_(set(response.context['objects'].object_list), set([report]))
def test_functional_area_list(self):
functional_area_1 = FunctionalAreaFactory.create()
functional_area_2 = FunctionalAreaFactory.create()
report = NGReportFactory.create(functional_areas=[functional_area_1])
NGReportFactory.create(functional_areas=[functional_area_2])
url = reverse('list_ng_reports_functional_area',
kwargs={'functional_area_slug': functional_area_1.slug})
response = Client().get(url)
eq_(set(response.context['objects'].object_list), set([report]))
def test_rep_functional_area_list(self):
user = UserFactory.create(groups=['Rep'])
functional_area = FunctionalAreaFactory.create()
report = NGReportFactory.create(user=user, functional_areas=[functional_area])
NGReportFactory.create(functional_areas=[functional_area])
url = reverse('list_ng_reports_rep_functional_area',
kwargs={'functional_area_slug': functional_area.slug,
'rep': user.userprofile.display_name})
response = Client().get(url)
eq_(set(response.context['objects'].object_list), set([report]))
def test_mentor_functional_area_list(self):
mentor = UserFactory.create(groups=['Mentor'])
functional_area = FunctionalAreaFactory.create()
report = NGReportFactory.create(mentor=mentor, functional_areas=[functional_area])
NGReportFactory.create(functional_areas=[functional_area])
url = reverse('list_ng_reports_mentor_functional_area',
kwargs={'functional_area_slug': functional_area.slug,
'mentor': mentor.userprofile.display_name})
response = Client().get(url)
eq_(set(response.context['objects'].object_list), set([report]))
class LegacyReportingTests(RemoTestCase):
def test_old_report_redirect(self):
"""Test old report url redirects to list of reports for that month."""
user = UserFactory.create(groups=['Rep'])
report_date = datetime.date(2011, 01, 05)
NGReportFactory.create_batch(3, user=user, report_date=report_date)
display_name = user.userprofile.display_name
url = reverse('reports_ng_view_report',
kwargs={'display_name': display_name,
'month': 'January',
'year': 2011})
response = Client().get(url, follow=True)
expected_redirect_url = '/reports/rep/{}/'.format(display_name)
self.assertEqual(response.status_code, 200)
redirect_full_url, redirect_code = response.redirect_chain[0]
self.assertEqual(redirect_code, 302)
redirect_url, redirect_params = redirect_full_url.split('?')
self.assertEqual(response.status_code, 200)
self.assertTrue(redirect_url.endswith(expected_redirect_url))
self.assertEqual(set(redirect_params.split('&')),
set(['year=2011', 'month=January']))
eq_(response.context['number_of_reports'], 3)
| |
# Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Keras Model to Estimator example code for proto formats stored in TFRecord.
The supported proto formats are listed at ../python/data.py.
--------------------------------------------------------------------------------
Sample command lines:
MODEL_DIR=/tmp/output && \
TRAIN=tensorflow_ranking/examples/data/train_elwc.tfrecord && \
EVAL=tensorflow_ranking/examples/data/eval_elwc.tfrecord && \
VOCAB=tensorflow_ranking/examples/data/vocab.txt && \
WEIGHT_FEATURE_NAME="doc_weight" && \
rm -rf $MODEL_DIR && \
bazel build -c opt \
tensorflow_ranking/examples/keras/keras_m2e_tfrecord_py_binary && \
./bazel-bin/tensorflow_ranking/examples/keras/keras_m2e_tfrecord_py_binary \
--train_path=$TRAIN \
--eval_path=$EVAL \
--vocab_path=$VOCAB \
--model_dir=$MODEL_DIR \
--data_format=example_list_with_context \
--weights_feature_name=$WEIGHT_FEATURE_NAME
You can use TensorBoard to display the training results stored in $MODEL_DIR.
Notes:
* Use --alsologtostderr if the output is not printed into screen.
"""
from absl import flags
import tensorflow as tf
import tensorflow_ranking as tfr
flags.DEFINE_enum(
"data_format", "example_list_with_context",
["example_list_with_context", "example_in_example", "sequence_example"],
"Data format defined in data.py.")
flags.DEFINE_string("train_path", None, "Input file path used for training.")
flags.DEFINE_string("eval_path", None, "Input file path used for eval.")
flags.DEFINE_string("vocab_path", None,
"Vocabulary path for query and document tokens.")
flags.DEFINE_string("model_dir", None, "Output directory for models.")
flags.DEFINE_integer("batch_size", 32, "The batch size for train.")
flags.DEFINE_integer("num_train_steps", 15000, "Number of steps for train.")
flags.DEFINE_float("learning_rate", 0.05, "Learning rate for optimizer.")
flags.DEFINE_float("dropout_rate", 0.8, "The dropout rate before output layer.")
flags.DEFINE_list("hidden_layer_dims", ["64", "32", "16"],
"Sizes for hidden layers.")
flags.DEFINE_integer(
"list_size", None,
"List size used for training. Use None for dynamic list size.")
flags.DEFINE_integer("group_size", 1, "Group size used in score function.")
flags.DEFINE_string("loss", "approx_ndcg_loss",
"The RankingLossKey for the loss function.")
flags.DEFINE_string(
"weights_feature_name", None,
"The name of the feature where unbiased learning-to-rank "
"weights are stored.")
FLAGS = flags.FLAGS
_LABEL_FEATURE = "relevance"
_PADDING_LABEL = -1
_EMBEDDING_DIMENSION = 20
_SIZE = "example_list_size"
def _get_feature_columns():
"""Returns context and example feature columns.
Returns:
A tuple of dicts (context_feature_columns, example_feature_columns), where
the dicts are a mapping from feature name to feature column.
"""
if FLAGS.vocab_path:
sparse_column = tf.feature_column.categorical_column_with_vocabulary_file(
key="query_tokens", vocabulary_file=FLAGS.vocab_path)
else:
sparse_column = tf.feature_column.categorical_column_with_hash_bucket(
key="query_tokens", hash_bucket_size=100)
query_embedding_column = tf.feature_column.embedding_column(
sparse_column, _EMBEDDING_DIMENSION)
context_feature_columns = {"query_tokens": query_embedding_column}
if FLAGS.vocab_path:
sparse_column = tf.feature_column.categorical_column_with_vocabulary_file(
key="document_tokens", vocabulary_file=FLAGS.vocab_path)
else:
sparse_column = tf.feature_column.categorical_column_with_hash_bucket(
key="document_tokens", hash_bucket_size=100)
document_embedding_column = tf.feature_column.embedding_column(
sparse_column, _EMBEDDING_DIMENSION)
example_feature_columns = {"document_tokens": document_embedding_column}
return context_feature_columns, example_feature_columns
def _get_example_weight_feature_column():
if FLAGS.weights_feature_name:
return tf.feature_column.numeric_column(
FLAGS.weights_feature_name, dtype=tf.float32, default_value=1.)
return None
def make_input_fn(file_pattern,
batch_size,
randomize_input=True,
num_epochs=None):
"""Returns `Estimator` `input_fn` for TRAIN and EVAL.
Args:
file_pattern: (string) file pattern for the TFRecord input data.
batch_size: (int) number of input examples to process per batch.
randomize_input: (bool) if true, randomize input example order. It should
almost always be true except for unittest/debug purposes.
num_epochs: (int) Number of times the input dataset must be repeated. None
to repeat the data indefinitely.
Returns:
An `input_fn` for `Estimator`.
"""
tf.compat.v1.logging.info("FLAGS.data_format={}".format(FLAGS.data_format))
def _input_fn():
"""Defines the input_fn."""
context_feature_columns, example_feature_columns = _get_feature_columns()
context_feature_spec = tf.feature_column.make_parse_example_spec(
list(context_feature_columns.values()))
label_column = tf.feature_column.numeric_column(
_LABEL_FEATURE, dtype=tf.int64, default_value=_PADDING_LABEL)
weight_column = _get_example_weight_feature_column()
example_fc_list = (
list(example_feature_columns.values()) + [label_column] +
([weight_column] if weight_column else []))
example_feature_spec = tf.feature_column.make_parse_example_spec(
example_fc_list)
dataset = tfr.data.build_ranking_dataset(
file_pattern=file_pattern,
data_format=FLAGS.data_format,
batch_size=batch_size,
list_size=FLAGS.list_size,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
reader=tf.data.TFRecordDataset,
shuffle=randomize_input,
num_epochs=num_epochs,
size_feature_name=_SIZE)
features = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
label = tf.squeeze(features.pop(_LABEL_FEATURE), axis=2)
label = tf.cast(label, tf.float32)
return features, label
return _input_fn
def make_serving_input_fn():
"""Returns serving input fn."""
context_feature_columns, example_feature_columns = _get_feature_columns()
context_feature_spec = tf.feature_column.make_parse_example_spec(
context_feature_columns.values())
example_feature_spec = tf.feature_column.make_parse_example_spec(
example_feature_columns.values())
return tfr.data.build_ranking_serving_input_receiver_fn(
data_format=FLAGS.data_format,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
size_feature_name=_SIZE)
def get_estimator():
"""Create Keras ranking estimator."""
context_feature_columns, example_feature_columns = _get_feature_columns()
# To build your own custom ranking network, look at how canned
# DNNRankingNetwork is implemented. You can subclass
# tfr.keras.network.UnivariateRankingNetwork, or the more generic
# tfr.keras.network.RankingNetwork to build your own network.
network = tfr.keras.canned.DNNRankingNetwork(
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns,
hidden_layer_dims=[int(d) for d in FLAGS.hidden_layer_dims],
activation=tf.nn.relu,
dropout=FLAGS.dropout_rate,
use_batch_norm=True,
batch_norm_moment=0.99,
name="dnn_ranking_model")
loss = tfr.keras.losses.get(
FLAGS.loss, reduction=tf.compat.v2.losses.Reduction.SUM_OVER_BATCH_SIZE)
metrics = tfr.keras.metrics.default_keras_metrics()
optimizer = tf.keras.optimizers.Adagrad(learning_rate=FLAGS.learning_rate)
config = tf.estimator.RunConfig(save_checkpoints_steps=1000)
ranker = tfr.keras.model.create_keras_model(
network=network,
loss=loss,
metrics=metrics,
optimizer=optimizer,
size_feature_name=_SIZE)
estimator = tfr.keras.estimator.model_to_estimator(
model=ranker,
model_dir=FLAGS.model_dir,
config=config,
weights_feature_name=FLAGS.weights_feature_name)
return estimator
def train_and_eval():
"""Train and Evaluate."""
train_input_fn = make_input_fn(FLAGS.train_path, FLAGS.batch_size)
eval_input_fn = make_input_fn(
FLAGS.eval_path, FLAGS.batch_size, randomize_input=False, num_epochs=1)
estimator = get_estimator()
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
exporters = tf.estimator.LatestExporter(
"saved_model_exporter", serving_input_receiver_fn=make_serving_input_fn())
eval_spec = tf.estimator.EvalSpec(
name="eval",
input_fn=eval_input_fn,
steps=1,
exporters=exporters,
start_delay_secs=0,
throttle_secs=15)
# Train and validate.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def main(_):
tf.compat.v1.set_random_seed(1234)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
train_and_eval()
if __name__ == "__main__":
flags.mark_flag_as_required("train_path")
flags.mark_flag_as_required("eval_path")
flags.mark_flag_as_required("model_dir")
tf.compat.v1.app.run()
| |
from billing import Integration, IntegrationNotConfigured
from billing.models import GCNewOrderNotification
from django.conf import settings
from xml.dom import minidom
import hmac
import hashlib
import base64
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.http import HttpResponse, QueryDict
from billing import signals
from django.conf.urls import patterns
from django.utils.decorators import method_decorator
from django.core.exceptions import PermissionDenied
SANDBOX_URL = 'https://sandbox.google.com/checkout/api/checkout/v2/checkout/Merchant/%s'
PROD_URL = 'https://checkout.google.com/api/checkout/v2/checkout/Merchant/%s'
BUTTON_SANDBOX_URL = 'https://sandbox.google.com/checkout/buttons/checkout.gif?merchant_id=%(merchant_id)s&w=%(width)s&h=%(height)s&style=white&variant=text&loc=en_US'
BUTTON_URL = 'https://checkout.google.com/buttons/checkout.gif?merchant_id=%(merchant_id)s&w=%(width)s&h=%(height)s&style=white&variant=text&loc=en_US'
csrf_exempt_m = method_decorator(csrf_exempt)
require_POST_m = method_decorator(require_POST)
class GoogleCheckoutIntegration(Integration):
display_name = 'Google Checkout'
template = "billing/google_checkout.html"
def __init__(self, options=None):
if not options:
options = {}
super(GoogleCheckoutIntegration, self).__init__(options=options)
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("google_checkout"):
raise IntegrationNotConfigured("The '%s' integration is not correctly "
"configured." % self.display_name)
google_checkout_settings = merchant_settings["google_checkout"]
self.merchant_id = google_checkout_settings['MERCHANT_ID']
self.merchant_key = google_checkout_settings['MERCHANT_KEY']
self._signature = None
@property
def service_url(self):
if self.test_mode:
return SANDBOX_URL % self.merchant_id
return PROD_URL % self.merchant_id
def button_image_url(self):
params = {"merchant_id": self.merchant_id,
"width": self.button_width,
"height": self.button_height}
if self.test_mode:
return BUTTON_SANDBOX_URL % params
return BUTTON_URL % params
@property
def button_width(self):
return self.fields.get("button_width", 180)
@property
def button_height(self):
return self.fields.get("button_height", 46)
def _add_nodes(self, doc, parent_node, child_node_name,
child_subnode_name, child_node_values):
""" Helper method that makes it easy to add a bunch of like child nodes
to a parent node"""
if child_node_values:
for value in child_node_values:
child_node = doc.createElement(unicode(child_node_name))
child_sub_node = doc.createElement(unicode(child_subnode_name))
child_node.appendChild(child_sub_node)
child_sub_node.appendChild(doc.createTextNode(value))
parent_node.appendChild(child_node)
def _shipping_allowed_excluded(self, doc, parent_node, data):
""" Build the nodes for the allowed-areas, excluded-areas
for shipping-restrictions and address-filters """
if not data:
return
states = data.get('us-state-area', None)
zips = data.get('us-zip-area', None)
country = data.get('us-country-area', None)
world = data.get('world-area', False)
postal = data.get('postal-area', None)
self._add_nodes(doc, parent_node, 'us-state-area', 'state', states)
self._add_nodes(doc, parent_node, 'us-zip-area', 'zip-pattern', zips)
if country:
us_country_area = doc.createElement('us-country-area')
us_country_area.setAttribute('country-area', unicode(country))
parent_node.appendChild(us_country_area)
if world:
parent_node.appendChild(doc.createElement('world-area'))
if postal:
for post in postal:
p_country_code = post.get('country-code', None)
p_pattern = post.get('postal-code-pattern', None)
postal_area = doc.createElement('postal-area')
if p_country_code:
c_code = doc.createElement('country-code')
c_code.appendChild(doc.createTextNode(unicode(p_country_code)))
postal_area.appendChild(c_code)
if p_pattern:
for pp in p_pattern:
p_p = doc.createElement('postal-code-pattern')
p_p.appendChild(doc.createTextNode(unicode(pp)))
postal_area.appendChild(p_p)
parent_node.appendChild(postal_area)
def _shipping_restrictions_filters(self, doc, parent_node, data):
""" process the shipping restriction and address-filter sections for
the shipping method merchant-calculated-shipping and flat-rate-shipping
"""
the_allowed_areas = data.get('allowed-areas', None)
the_excluded_areas = data.get('excluded-areas', None)
allow_us_po_box = data.get('allow-us-po-box', None)
if allow_us_po_box is not None:
allow_po_box = doc.createElement('allow-us-po-box')
allow_po_box.appendChild(
doc.createTextNode(str(allow_us_po_box).lower()))
parent_node.appendChild(allow_po_box)
if the_allowed_areas:
allowed_areas = doc.createElement('allowed-areas')
parent_node.appendChild(allowed_areas)
self._shipping_allowed_excluded(doc,
allowed_areas,
the_allowed_areas)
if the_excluded_areas:
excluded_areas = doc.createElement('excluded-areas')
parent_node.appendChild(excluded_areas)
self._shipping_allowed_excluded(doc,
excluded_areas,
the_excluded_areas)
def _process_tax_rule(self, doc, parent_node, node_name, data, show_shipping_tax=True):
""" process a tax rule default_tax_rule, and alternative_tax_rule"""
tax_rule = doc.createElement(node_name)
parent_node.appendChild(tax_rule)
shipping_taxed = data.get('shipping-taxed', False)
rate = data.get('rate', 0)
tax_area = data.get('tax-area', {})
zips = tax_area.get('us-zip-area', [])
states = tax_area.get('us-state-area', [])
postal = tax_area.get('postal-area', [])
country = tax_area.get('us-country-area', None)
word_area = tax_area.get('world-area', False)
if shipping_taxed is not None and show_shipping_tax:
shippingtaxed_node = doc.createElement('shipping-taxed')
shippingtaxed_node.appendChild(
doc.createTextNode(str(shipping_taxed).lower()))
tax_rule.appendChild(shippingtaxed_node)
rate_node = doc.createElement('rate')
rate_node.appendChild(
doc.createTextNode(str(rate)))
tax_rule.appendChild(rate_node)
# if there is more then one area then the tag switches from
# tax-area to tax-areas.
total_areas = len(zips) + len(states) + len(postal)
if word_area:
total_areas += 1
if country is not None:
total_areas += 1
if total_areas == 1:
tax_area_label = 'tax-area'
else:
tax_area_label = 'tax-areas'
tax_area_node = doc.createElement(tax_area_label)
tax_rule.appendChild(tax_area_node)
self._add_nodes(doc, tax_area_node, 'us-state-area', 'state', states)
self._add_nodes(doc, tax_area_node, 'us-zip-area', 'zip-pattern', zips)
if country is not None:
us_country_area = doc.createElement('us-country-area')
us_country_area.setAttribute('country-area', unicode(country))
tax_area_node.appendChild(us_country_area)
if word_area:
tax_area_node.appendChild(doc.createElement('world-area'))
if postal:
for post in postal:
p_country_code = post.get('country-code', None)
p_pattern = post.get('postal-code-pattern', None)
postal_area = doc.createElement('postal-area')
if p_country_code:
c_code = doc.createElement('country-code')
c_code.appendChild(doc.createTextNode(unicode(p_country_code)))
postal_area.appendChild(c_code)
if p_pattern:
for pp in p_pattern:
p_p = doc.createElement('postal-code-pattern')
p_p.appendChild(doc.createTextNode(unicode(pp)))
postal_area.appendChild(p_p)
tax_area_node.appendChild(postal_area)
def _alt_tax_tables(self, doc, parent_node, data):
""" Alternative Tax tables """
alt_tax_tables = data.get('alternate-tax-tables', None)
if not alt_tax_tables:
return
alt_tax_tables_node = doc.createElement('alternate-tax-tables')
parent_node.appendChild(alt_tax_tables_node)
for alt_tax_table in alt_tax_tables:
alt_tax_table_node = doc.createElement('alternate-tax-table')
alt_tax_table_node.setAttribute('name', unicode(alt_tax_table.get('name')))
alt_tax_table_node.setAttribute('standalone', unicode(str(alt_tax_table.get('standalone', False)).lower()))
alt_tax_tables_node.appendChild(alt_tax_table_node)
# if there are no rules we still want to show the element <alternate-tax-rules/>
alt_tax_rules = alt_tax_table.get('alternative-tax-rules', [])
alt_tax_rules_node = doc.createElement('alternate-tax-rules')
alt_tax_table_node.appendChild(alt_tax_rules_node)
for tax_rule in alt_tax_rules:
self._process_tax_rule(doc, alt_tax_rules_node, 'alternate-tax-rule', tax_rule, show_shipping_tax=False)
def _default_tax_table(self, doc, parent_node, data):
""" process default tax table """
default_tax_table_node = doc.createElement('default-tax-table')
parent_node.appendChild(default_tax_table_node)
tax_rules_node = doc.createElement('tax-rules')
default_tax_table_node.appendChild(tax_rules_node)
default_tax_table = data.get('default-tax-table', None)
if default_tax_table:
tax_rules = default_tax_table.get('tax-rules', [])
for tax_rule in tax_rules:
self._process_tax_rule(doc, tax_rules_node, 'default-tax-rule', tax_rule)
def _taxes(self, doc, parent_node, data):
""" Process the taxes section """
tax_tables = doc.createElement('tax-tables')
parent_node.appendChild(tax_tables)
self._default_tax_table(doc, tax_tables, data)
self._alt_tax_tables(doc, tax_tables, data)
def _process_item(self, doc, parent, item, item_tag_name="item"):
it = doc.createElement(item_tag_name)
parent.appendChild(it)
it_name = doc.createElement("item-name")
it_name.appendChild(doc.createTextNode(unicode(item["name"])))
it.appendChild(it_name)
it_descr = doc.createElement('item-description')
it_descr.appendChild(doc.createTextNode(unicode(item["description"])))
it.appendChild(it_descr)
it_price = doc.createElement("unit-price")
it_price.setAttribute("currency", unicode(item["currency"]))
it_price.appendChild(doc.createTextNode(unicode(item["amount"])))
it.appendChild(it_price)
it_qty = doc.createElement("quantity")
it_qty.appendChild(doc.createTextNode(unicode(item["quantity"])))
it.appendChild(it_qty)
it_unique_id = doc.createElement("merchant-item-id")
it_unique_id.appendChild(doc.createTextNode(unicode(item["id"])))
it.appendChild(it_unique_id)
if 'private-item-data' in item:
it_private = doc.createElement("merchant-private-item-data")
it.appendChild(it_private)
it_data = unicode(item.get('private-item-data', ""))
it_private.appendChild(doc.createTextNode(it_data))
if 'subscription' in item:
subscription = item['subscription']
it_subscription = doc.createElement("subscription")
if "type" in subscription:
it_subscription.setAttribute('type', unicode(subscription["type"]))
if "period" in subscription:
it_subscription.setAttribute('period', unicode(subscription["period"]))
if "start-date" in subscription:
it_subscription.setAttribute('start-date', unicode(subscription["start-date"]))
if "no-charge-after" in subscription:
it_subscription.setAttribute('no-charge-after', unicode(subscription["no-charge-after"]))
it.appendChild(it_subscription)
if "payments" in subscription:
it_payments = doc.createElement("payments")
it_subscription.appendChild(it_payments)
payment_items = subscription["payments"]
for payment in payment_items:
it_subscription_payment = doc.createElement("subscription-payment")
it_payments.appendChild(it_subscription_payment)
if 'times' in payment:
it_subscription_payment.setAttribute('times', unicode(payment["times"]))
maximum_charge = doc.createElement("maximum-charge")
maximum_charge.setAttribute("currency", unicode(payment["currency"]))
it_subscription_payment.appendChild(maximum_charge)
maximum_charge.appendChild(doc.createTextNode(unicode(payment["maximum-charge"])))
if "recurrent-items" in subscription:
recurrent_items = subscription["recurrent-items"]
for recurrent_item in recurrent_items:
self._process_item(doc, it_subscription, recurrent_item, item_tag_name="recurrent-item")
if "digital-content" in item:
digital_content = item['digital-content']
it_dc = doc.createElement("digital-content")
it.appendChild(it_dc)
if "display-disposition" in digital_content:
dc_dd = doc.createElement('display-disposition')
dc_dd.appendChild(doc.createTextNode(unicode(digital_content["display-disposition"])))
it_dc.appendChild(dc_dd)
if "description" in digital_content:
dc_descr = doc.createElement('description')
dc_descr.appendChild(doc.createTextNode(unicode(digital_content["description"])))
it_dc.appendChild(dc_descr)
if "email-delivery" in digital_content:
dc_email = doc.createElement('email-delivery')
dc_email.appendChild(doc.createTextNode(unicode(digital_content["email-delivery"])))
it_dc.appendChild(dc_email)
if "key" in digital_content:
dc_key = doc.createElement('key')
dc_key.appendChild(doc.createTextNode(unicode(digital_content["key"])))
it_dc.appendChild(dc_key)
if "url" in digital_content:
dc_url = doc.createElement('url')
dc_url.appendChild(doc.createTextNode(unicode(digital_content["url"])))
it_dc.appendChild(dc_url)
if 'tax-table-selector' in item:
tax_table_selector_node = doc.createElement('tax-table-selector')
it.appendChild(tax_table_selector_node)
it_tax_table = unicode(item.get('tax-table-selector', ""))
tax_table_selector_node.appendChild(doc.createTextNode(it_tax_table))
def build_xml(self):
""" Build up the Cart XML. Seperate method for easier unit testing """
doc = minidom.Document()
root = doc.createElement('checkout-shopping-cart')
root.setAttribute('xmlns', 'http://checkout.google.com/schema/2')
doc.appendChild(root)
cart = doc.createElement('shopping-cart')
root.appendChild(cart)
items = doc.createElement('items')
cart.appendChild(items)
merchant_private_data = doc.createElement('merchant-private-data')
cart.appendChild(merchant_private_data)
private_data = unicode(self.fields.get("private_data", ""))
merchant_private_data.appendChild(doc.createTextNode(private_data))
ip_items = self.fields.get("items", [])
for item in ip_items:
self._process_item(doc, items, item)
checkout_flow = doc.createElement('checkout-flow-support')
root.appendChild(checkout_flow)
merchant_checkout_flow = doc.createElement('merchant-checkout-flow-support')
checkout_flow.appendChild(merchant_checkout_flow)
return_url = doc.createElement('continue-shopping-url')
return_url.appendChild(doc.createTextNode(self.fields["return_url"]))
merchant_checkout_flow.appendChild(return_url)
# supports: flat-rate-shipping, merchant-calculated-shipping, pickup
# No support for carrier-calculated-shipping yet
shipping = self.fields.get("shipping-methods", [])
if shipping:
shipping_methods = doc.createElement('shipping-methods')
merchant_checkout_flow.appendChild(shipping_methods)
for ship_method in shipping:
# don't put dict.get() because we want these to fail if
# they aren't here because they are required.
shipping_type = doc.createElement(unicode(ship_method["shipping_type"]))
shipping_type.setAttribute('name', unicode(ship_method["name"]))
shipping_methods.appendChild(shipping_type)
shipping_price = doc.createElement('price')
shipping_price.setAttribute('currency', unicode(ship_method["currency"]))
shipping_type.appendChild(shipping_price)
shipping_price_text = doc.createTextNode(unicode(ship_method["price"]))
shipping_price.appendChild(shipping_price_text)
restrictions = ship_method.get('shipping-restrictions', None)
if restrictions:
shipping_restrictions = doc.createElement('shipping-restrictions')
shipping_type.appendChild(shipping_restrictions)
self._shipping_restrictions_filters(doc,
shipping_restrictions,
restrictions)
address_filters = ship_method.get('address-filters', None)
if address_filters:
address_filters_node = doc.createElement('address-filters')
shipping_type.appendChild(address_filters_node)
self._shipping_restrictions_filters(doc,
address_filters_node,
address_filters)
# add support for taxes.
# both default-tax-table and alternate-tax-tables is supported.
taxes = self.fields.get("tax-tables", None)
if taxes:
self._taxes(doc, merchant_checkout_flow, taxes)
return doc.toxml(encoding="utf-8")
def generate_cart_xml(self):
cart_xml = self.build_xml()
hmac_signature = hmac.new(self.merchant_key, cart_xml, hashlib.sha1).digest()
self._signature = base64.b64encode(hmac_signature)
return base64.b64encode(cart_xml)
def signature(self):
if not self._signature:
self.generate_cart_xml()
return self._signature
@csrf_exempt_m
@require_POST_m
def gc_notify_handler(self, request):
#get the Authorization string from the Google POST header
auth_string = request.META.get("HTTP_AUTHORIZATION", "")
if auth_string:
#decode the Authorization string and remove Basic portion
plain_string = base64.b64decode(auth_string.lstrip('Basic '))
#split the decoded string at the ':'
split_string = plain_string.split(':')
merchant_id = split_string[0]
merchant_key = split_string[1]
if self.check_auth(merchant_id, merchant_key):
data = self.parse_response(request.body)
type = data.get('type', "")
serial_number = data.get('serial-number', "").strip()
if type == 'new-order-notification':
self.gc_new_order_notification(data)
elif type == 'order-state-change-notification':
self.gc_order_state_change_notification(data)
elif type == 'charge-amount-notification':
self.gc_charge_amount_notification(data)
# Create Response
doc = minidom.Document()
notification_acknowledgment = doc.createElement("notification-acknowledgment")
notification_acknowledgment.setAttribute("xmlns","http://checkout.google.com/schema/2")
notification_acknowledgment.setAttribute("serial-number", serial_number)
doc.appendChild(notification_acknowledgment)
ack = doc.toxml(encoding="utf-8")
return HttpResponse(content=ack, content_type="text/xml; charset=UTF-8")
else:
raise PermissionDenied
else:
raise PermissionDenied
def gc_cart_items_blob(self, post_data):
items = post_data.getlist('shopping-cart.items')
cart_blob = ''
for item in items:
item_id = post_data.get('%s.merchant-item-id' % (item), '')
item_name = post_data.get('%s.item-name' % (item), '')
item_desc = post_data.get('%s.item-description' % (item), '')
item_price = post_data.get('%s.unit-price' % (item), '')
item_price_currency = post_data.get('%s.unit-price.currency' % (item), '')
item_quantity = post_data.get('%s.quantity' % (item), '')
item_private_data = post_data.get('%s.merchant-private-item-data' % (item), '')
cart_blob += '%(item_id)s\t%(item_name)s\t%(item_desc)s\t%(item_price)s\t%(item_price_currency)s\t%(item_quantity)s\t%(item_private_data)s\n\n' % ({"item_id": item_id,
"item_name": item_name,
"item_desc": item_desc,
"item_price": item_price,
"item_price_currency": item_price_currency,
"item_quantity": item_quantity,
"item_private_data": item_private_data,
})
return cart_blob
def gc_new_order_notification(self, post_data):
data = {}
resp_fields = {
"type": "notify_type",
"serial-number": "serial_number",
"google-order-number": "google_order_number",
"buyer-id": "buyer_id",
"buyer-shipping-address.contact-name": "shipping_contact_name",
"buyer-shipping-address.address1": "shipping_address1",
"buyer-shipping-address.address2": "shipping_address2",
"buyer-shipping-address.city": "shipping_city",
"buyer-shipping-address.postal-code": "shipping_postal_code",
"buyer-shipping-address.region": "shipping_region",
"buyer-shipping-address.country-code": "shipping_country_code",
"buyer-shipping-address.email": "shipping_email",
"buyer-shipping-address.company-name": "shipping_company_name",
"buyer-shipping-address.fax": "shipping_fax",
"buyer-shipping-address.phone": "shipping_phone",
"buyer-billing-address.contact-name": "billing_contact_name",
"buyer-billing-address.address1": "billing_address1",
"buyer-billing-address.address2": "billing_address2",
"buyer-billing-address.city": "billing_city",
"buyer-billing-address.postal-code": "billing_postal_code",
"buyer-billing-address.region": "billing_region",
"buyer-billing-address.country-code": "billing_country_code",
"buyer-billing-address.email": "billing_email",
"buyer-billing-address.company-name": "billing_company_name",
"buyer-billing-address.fax": "billing_fax",
"buyer-billing-address.phone": "billing_phone",
"buyer-marketing-preferences.email-allowed": "marketing_email_allowed",
"order-adjustment.total-tax": "total_tax",
"order-adjustment.total-tax.currency": "total_tax_currency",
"order-adjustment.adjustment-total": "adjustment_total",
"order-adjustment.adjustment-total.currency": "adjustment_total_currency",
"order-total": "order_total",
"order-total.currency": "order_total_currency",
"financial-order-state": "financial_order_state",
"fulfillment-order-state": "fulfillment_order_state",
"timestamp": "timestamp",
"shopping-cart.merchant-private-data": "private_data",
}
for (key, val) in resp_fields.iteritems():
data[val] = post_data.get(key, '')
data['num_cart_items'] = len(post_data.getlist('shopping-cart.items'))
data['cart_items'] = self.gc_cart_items_blob(post_data)
resp = GCNewOrderNotification.objects.create(**data)
def gc_order_state_change_notification(self, post_data):
order = GCNewOrderNotification.objects.get(google_order_number=post_data['google-order-number'])
order.financial_order_state = post_data['new-financial-order-state']
order.fulfillment_order_state = post_data['new-fulfillment-order-state']
order.save()
def gc_charge_amount_notification(self, post_data):
order = GCNewOrderNotification.objects.get(google_order_number=post_data['google-order-number'])
post_data['local_order'] = order
signals.transaction_was_successful.send(sender=self.__class__,
type="purchase",
response=post_data)
def get_urls(self):
urlpatterns = patterns('',
(r'^gc-notify-handler/$', self.gc_notify_handler),
)
return urlpatterns
def check_auth(self, merchant_id, merchant_key):
"Check to ensure valid Google notification."
if merchant_id == self.merchant_id and merchant_key == self.merchant_key:
return True
else: return False
def parse_response(self, response):
dom = minidom.parseString(response)
response_type = dom.childNodes[0].localName #get the reaponse type
#use this dictionary to determine which items will be taken from the reaponse
result = QueryDict("", mutable=True)
result['type'] = response_type
# load root values
result.update(self.load_child_nodes(dom.childNodes[0], is_root=True, ignore_nodes=["items"]))
# load items
items_arr = []
items_node = dom.getElementsByTagName('items')
if items_node:
n = 0
for item in items_node[0].childNodes:
if item.localName:
# load root item values
item_name = 'item-%s' % n
for key, value in self.load_child_nodes(item, is_root=True, ignore_nodes=['subscription', 'digital-content']).items():
result['%s.%s' % (item_name, key)] = value
n += 1
items_arr.append(item_name)
result.setlist('shopping-cart.items', items_arr)
return result
def load_child_nodes(self, node, load_attributes=True, load_complex_nodes=True, is_root=False, ignore_nodes=[]):
result={}
if node:
if is_root:
for key, value in node.attributes.items():
result[str(key)] = value
for n in node.childNodes:
if n.localName and n.localName not in ignore_nodes:
if load_attributes:
for key, value in n.attributes.items():
if is_root:
result['%s.%s' % (str(n.localName), str(key))] = value
else:
result['%s.%s.%s' % (str(node.localName), str(n.localName), str(key))] = value
if len(n.childNodes) > 1 and load_complex_nodes:
for key, value in self.load_child_nodes(n, ignore_nodes=ignore_nodes).items():
if is_root:
result[key] = value
else:
result['%s.%s' % (str(node.localName), str(key))] = value
elif n.firstChild:
if is_root:
result[str(n.localName)] = n.firstChild.data
else:
result['%s.%s' % (str(node.localName), str(n.localName))] = n.firstChild.data
else:
if is_root:
result[str(n.localName)] = ""
else:
result['%s.%s' % (str(node.localName), str(n.localName))] = ""
return result
| |
import mdtraj
import numpy as np
from subprocess import call, PIPE
def write_cpptraj_script(traj, top, frame1=1, frame2=1,
outfile=None, write=True, run=False):
"""
Create a cpptraj script to load specific range of frames from a trajectory and write them out to a file
:param traj: str, Location in disk of trajectories to load
:param top: str, Location in disk of the topology file
:param frame1: int, The first frame to load
:param frame2: int, The last frame to load
:param outfile: str, Name (with file format extension) of the output trajectory
:param write: bool, Whether to write the script to a file in disk
:param run: bool, Whether to run the script after writing it to disk
:return cmds: str, the string representing the cpptraj script
"""
if run and not write:
raise ValueError('Cannot call the script without writing it to disk')
if outfile is None:
outfile = 'pdbs/' + traj.split('.')[0] + '.pdb'
commands = [
'parm {}'.format(top),
'trajin {} {} {}'.format(traj, frame1, frame2),
'trajout {}'.format(outfile),
'run'
]
cmds = '\n'.join(commands)
if write:
with open('script.cpptraj', 'w') as f:
f.write(cmds)
if run:
call(['cpptraj', '-i', 'script.cpptraj'], stdout=PIPE)
return cmds
def load_Trajs(trajfiles_list, prmtop_file, stride=1, chunk=1000):
"""
Iteratively loads a list of NetCDF files and returns them
as a list of mdtraj.Trajectory objects
Parameters
----------
trajfiles_list: list of str
List with the names of trajectory files
prmtop_file: str
Name of the prmtop file
stride: int
Frames to be used when loading the trajectories
chunk: int
Number of frames to load at once from disk per iteration.
If 0, load all.
Returns
-------
list_chunks: list
List of mdtraj.Trajectory objects, each of 'chunk' lenght
"""
list_chunks = []
for traj in trajfiles_list:
for frag in mdtraj.iterload(traj, chunk=chunk, top=prmtop_file,
stride=stride):
list_chunks.append(frag)
return(list_chunks)
def load_Trajs_generator(trajfiles_list, prmtop_file, stride, chunk):
"""
Iteratively loads a list of NetCDF files and returns them
as an iterable of mdtraj.Trajectory objects
Parameters
----------
trajfiles_list: list of str
List with the names of trajectory files
prmtop_file: str
Name of the prmtop file
stride: int
Frames to be used when loading the trajectories
chunk: int
Number of frames to load at once from disk per iteration.
If 0, load all.
Yields
------
frag: mdtraj.Trajectory
"""
try:
for traj in trajfiles_list:
for frag in mdtraj.iterload(traj, chunk=chunk, top=prmtop_file,
stride=stride):
yield frag
except OSError:
# User passed a single long trajectory as a string
# so there's no need to iterate through it.
for frag in mdtraj.iterload(trajfiles_list,
chunk=chunk,
top=prmtop_file,
stride=stride):
yield frag
def traj_list_to_dict(trajfiles_list, prmtop_file, stride=1):
"""
Loads a list of trajs passed as a list of strings into a
dictionary with keys as integers from 0
"""
trajs_dict = {}
for i, traj in enumerate(trajfiles_list):
trajs_dict[i] = mdtraj.load(traj, top=prmtop_file, stride=stride)
return trajs_dict
def split_trajs_by_type(traj_dict, meta):
"""
Find the kind of types of simulations inside the meta object
and build a dictionary that has them as keys. Then, build a dictionary
of the trajs inside traj_dict that belong to each type.
"""
if len(traj_dict) != len(meta):
raise ValueError('Lengths of traj_dict and meta do not match.')
type_set = set(meta['type'])
# dict which stores each subtype dict of trajs
type_dict = dict.fromkeys(type_set)
for t in type_set:
new_dict = {}
for i, row in meta.iterrows():
if row['type'] == t:
new_dict[i] = traj_dict[i]
type_dict[t] = new_dict
return type_dict
def trim_centers_by_region(clusterer, x1=None, x2=None,
y1=None, y2=None, obs=(0, 1)):
"""
Find the cluster centers that fall within a user-defined region.
:param clusterer: an msmbuilder cluster object
:param x1: float The low limit of the x axis
:param x2: float The high limit of the x axis
:param y1: float The low limit of the y axis
:param y2: float The high limit of the y axis
:param obs: tuple, the dimensions to sample
:return trimmed: np.array, Cluster centers that are within the region
"""
if not hasattr(clusterer, 'cluster_centers_'):
raise AttributeError('The provided clusterer object has no cluster_centers_ property.')
centers = clusterer.cluster_centers_
pruned = centers[:, obs]
if x1 is None:
x1 = np.min(pruned[:, 0])
if y1 is None:
y1 = np.min(pruned[:, 1])
if x2 is None:
x2 = np.max(pruned[:, 0])
if y2 is None:
y2 = np.max(pruned[:, 1])
trimmed = centers[
((pruned[:, 0] > x1) & (pruned[:, 0] < x2)) &
((pruned[:, 1] > y1) & (pruned[:, 1] < y2))
]
return trimmed
def cartesian_product(x, y):
return np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
def generate_traj_from_stateinds(inds, meta, atom_selection='all'):
"""
Concatenate several frames from different trajectories to create a new one.
Parameters
----------
inds: list of tuples, Each element of the list has to be a 2D tuple of ints
(traj_index, frame_index)
meta: a metadata object
atom_selection: str, Which atoms to load
Returns
-------
traj: mdtraj.Trajectory
"""
frame_list = []
for traj_i, frame_i in inds:
top = mdtraj.load_prmtop(meta.loc[traj_i]['top_fn'])
atoms = top.select(atom_selection)
frame_list.append(
mdtraj.load_frame(meta.loc[traj_i]['traj_fn'], atom_indices=atoms,
index=frame_i, top=meta.loc[traj_i]['top_fn'])
)
traj = mdtraj.join(frame_list, check_topology=False)
traj.center_coordinates()
traj.superpose(traj, 0)
return traj
def load_in_vmd(dirname, inds):
k = len(inds[0])
templ = [
'# Defaults',
'mol default material AOChalky',
'mol default representation NewCartoon',
'color Display {Background} white',
'axes location off',
]
for i in range(k):
templ += [
'# State {}'.format(i),
'mol new {}/{:03d}.pdb'.format(dirname, i),
'mol rename top State-{}'.format(i),
'mol modcolor 0 top ColorID {}'.format(i),
'mol drawframes top 0 0:{k}'.format(k=k),
'mol modselect 0 top resid 1 to 161',
'mol modcolor 0 top ColorID 0',
'mol addrep top',
'mol modselect 1 top resid 162 to 248',
'mol modcolor 1 top ColorID 7',
'mol addrep top',
'mol modselect 2 top resid 249 to 419',
'mol modcolor 2 top ColorID 1',
'mol addrep top',
'mol modselect 3 top not protein and not resname CAL',
'mol modstyle 3 top Licorice',
'mol addrep top',
'mol modselect 4 top resname CAL',
'mol modstyle 4 top VDW',
'mol modcolor 4 top ColorID 6'
'',
]
return '\n'.join(templ)
def get_source_sink(msm, clusterer, eigenvector, out_naming='msm'):
"""
Get the source and sink of a given eigenvector, in cluster naming of clusterer object
:param msm:
:param clusterer:
:param eigenvector:
:return:
"""
source_msm_naming = np.argmin(msm.left_eigenvectors_[:, eigenvector])
sink_msm_naming = np.argmax(msm.left_eigenvectors_[:, eigenvector])
source_clusterer_naming = msm.state_labels_[source_msm_naming]
sink_clusterer_naming = msm.state_labels_[sink_msm_naming]
assert msm.mapping_[source_clusterer_naming] == source_msm_naming
assert msm.mapping_[sink_clusterer_naming] == sink_msm_naming
if out_naming == 'msm':
return source_msm_naming, sink_msm_naming
elif out_naming == 'clusterer':
return source_clusterer_naming, sink_clusterer_naming
else:
raise ValueError('out_naming is not valid')
| |
"""Combination of multiple media players for a universal controller."""
from copy import copy
import logging
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
SERVICE_CLEAR_PLAYLIST,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ENTITY_PICTURE,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
CONF_STATE,
CONF_STATE_TEMPLATE,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_SHUFFLE_SET,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import EVENT_HOMEASSISTANT_START, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.service import async_call_from_config
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVE_CHILD = "active_child"
ATTR_DATA = "data"
CONF_ATTRS = "attributes"
CONF_CHILDREN = "children"
CONF_COMMANDS = "commands"
CONF_SERVICE = "service"
CONF_SERVICE_DATA = "service_data"
OFF_STATES = [STATE_IDLE, STATE_OFF, STATE_UNAVAILABLE]
ATTRS_SCHEMA = cv.schema_with_slug_keys(cv.string)
CMD_SCHEMA = cv.schema_with_slug_keys(cv.SERVICE_SCHEMA)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_CHILDREN, default=[]): cv.entity_ids,
vol.Optional(CONF_COMMANDS, default={}): CMD_SCHEMA,
vol.Optional(CONF_ATTRS, default={}): vol.Or(
cv.ensure_list(ATTRS_SCHEMA), ATTRS_SCHEMA
),
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
},
extra=vol.REMOVE_EXTRA,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the universal media players."""
await async_setup_reload_service(hass, "universal", ["media_player"])
player = UniversalMediaPlayer(
hass,
config.get(CONF_NAME),
config.get(CONF_CHILDREN),
config.get(CONF_COMMANDS),
config.get(CONF_ATTRS),
config.get(CONF_STATE_TEMPLATE),
)
async_add_entities([player])
class UniversalMediaPlayer(MediaPlayerEntity):
"""Representation of an universal media player."""
def __init__(self, hass, name, children, commands, attributes, state_template=None):
"""Initialize the Universal media device."""
self.hass = hass
self._name = name
self._children = children
self._cmds = commands
self._attrs = {}
for key, val in attributes.items():
attr = val.split("|", 1)
if len(attr) == 1:
attr.append(None)
self._attrs[key] = attr
self._child_state = None
self._state_template_result = None
self._state_template = state_template
async def async_added_to_hass(self):
"""Subscribe to children and template state changes."""
@callback
def _async_on_dependency_update(*_):
"""Update ha state when dependencies update."""
self.async_schedule_update_ha_state(True)
@callback
def _async_on_template_update(event, template, last_result, result):
"""Update ha state when dependencies update."""
if isinstance(result, TemplateError):
self._state_template_result = None
else:
self._state_template_result = result
self.async_schedule_update_ha_state(True)
if self._state_template is not None:
result = self.hass.helpers.event.async_track_template_result(
self._state_template, _async_on_template_update
)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, callback(lambda _: result.async_refresh())
)
self.async_on_remove(result.async_remove)
depend = copy(self._children)
for entity in self._attrs.values():
depend.append(entity[0])
self.async_on_remove(
self.hass.helpers.event.async_track_state_change_event(
list(set(depend)), _async_on_dependency_update
)
)
def _entity_lkp(self, entity_id, state_attr=None):
"""Look up an entity state."""
state_obj = self.hass.states.get(entity_id)
if state_obj is None:
return
if state_attr:
return state_obj.attributes.get(state_attr)
return state_obj.state
def _override_or_child_attr(self, attr_name):
"""Return either the override or the active child for attr_name."""
if attr_name in self._attrs:
return self._entity_lkp(
self._attrs[attr_name][0], self._attrs[attr_name][1]
)
return self._child_attr(attr_name)
def _child_attr(self, attr_name):
"""Return the active child's attributes."""
active_child = self._child_state
return active_child.attributes.get(attr_name) if active_child else None
async def _async_call_service(
self, service_name, service_data=None, allow_override=False
):
"""Call either a specified or active child's service."""
if service_data is None:
service_data = {}
if allow_override and service_name in self._cmds:
await async_call_from_config(
self.hass,
self._cmds[service_name],
variables=service_data,
blocking=True,
validate_config=False,
)
return
active_child = self._child_state
if active_child is None:
# No child to call service on
return
service_data[ATTR_ENTITY_ID] = active_child.entity_id
await self.hass.services.async_call(
DOMAIN, service_name, service_data, blocking=True, context=self._context
)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def master_state(self):
"""Return the master state for entity or None."""
if self._state_template is not None:
return self._state_template_result
if CONF_STATE in self._attrs:
master_state = self._entity_lkp(
self._attrs[CONF_STATE][0], self._attrs[CONF_STATE][1]
)
return master_state if master_state else STATE_OFF
return None
@property
def name(self):
"""Return the name of universal player."""
return self._name
@property
def state(self):
"""Return the current state of media player.
Off if master state is off
else Status of first active child
else master state or off
"""
master_state = self.master_state # avoid multiple lookups
if (master_state == STATE_OFF) or (self._state_template is not None):
return master_state
active_child = self._child_state
if active_child:
return active_child.state
return master_state if master_state else STATE_OFF
@property
def volume_level(self):
"""Volume level of entity specified in attributes or active child."""
try:
return float(self._override_or_child_attr(ATTR_MEDIA_VOLUME_LEVEL))
except (TypeError, ValueError):
return None
@property
def is_volume_muted(self):
"""Boolean if volume is muted."""
return self._override_or_child_attr(ATTR_MEDIA_VOLUME_MUTED) in [True, STATE_ON]
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._child_attr(ATTR_MEDIA_CONTENT_ID)
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return self._child_attr(ATTR_MEDIA_CONTENT_TYPE)
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._child_attr(ATTR_MEDIA_DURATION)
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._child_attr(ATTR_ENTITY_PICTURE)
@property
def entity_picture(self):
"""
Return image of the media playing.
The universal media player doesn't use the parent class logic, since
the url is coming from child entity pictures which have already been
sent through the API proxy.
"""
return self.media_image_url
@property
def media_title(self):
"""Title of current playing media."""
return self._child_attr(ATTR_MEDIA_TITLE)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ARTIST)
@property
def media_album_name(self):
"""Album name of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ALBUM_NAME)
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_ALBUM_ARTIST)
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
return self._child_attr(ATTR_MEDIA_TRACK)
@property
def media_series_title(self):
"""Return the title of the series of current playing media (TV)."""
return self._child_attr(ATTR_MEDIA_SERIES_TITLE)
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_SEASON)
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
return self._child_attr(ATTR_MEDIA_EPISODE)
@property
def media_channel(self):
"""Channel currently playing."""
return self._child_attr(ATTR_MEDIA_CHANNEL)
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return self._child_attr(ATTR_MEDIA_PLAYLIST)
@property
def app_id(self):
"""ID of the current running app."""
return self._child_attr(ATTR_APP_ID)
@property
def app_name(self):
"""Name of the current running app."""
return self._child_attr(ATTR_APP_NAME)
@property
def source(self):
"""Return the current input source of the device."""
return self._override_or_child_attr(ATTR_INPUT_SOURCE)
@property
def source_list(self):
"""List of available input sources."""
return self._override_or_child_attr(ATTR_INPUT_SOURCE_LIST)
@property
def shuffle(self):
"""Boolean if shuffling is enabled."""
return self._override_or_child_attr(ATTR_MEDIA_SHUFFLE)
@property
def supported_features(self):
"""Flag media player features that are supported."""
flags = self._child_attr(ATTR_SUPPORTED_FEATURES) or 0
if SERVICE_TURN_ON in self._cmds:
flags |= SUPPORT_TURN_ON
if SERVICE_TURN_OFF in self._cmds:
flags |= SUPPORT_TURN_OFF
if any([cmd in self._cmds for cmd in [SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN]]):
flags |= SUPPORT_VOLUME_STEP
if SERVICE_VOLUME_SET in self._cmds:
flags |= SUPPORT_VOLUME_SET
if SERVICE_VOLUME_MUTE in self._cmds and ATTR_MEDIA_VOLUME_MUTED in self._attrs:
flags |= SUPPORT_VOLUME_MUTE
if SERVICE_SELECT_SOURCE in self._cmds:
flags |= SUPPORT_SELECT_SOURCE
if SERVICE_CLEAR_PLAYLIST in self._cmds:
flags |= SUPPORT_CLEAR_PLAYLIST
if SERVICE_SHUFFLE_SET in self._cmds and ATTR_MEDIA_SHUFFLE in self._attrs:
flags |= SUPPORT_SHUFFLE_SET
return flags
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
active_child = self._child_state
return {ATTR_ACTIVE_CHILD: active_child.entity_id} if active_child else {}
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._child_attr(ATTR_MEDIA_POSITION)
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._child_attr(ATTR_MEDIA_POSITION_UPDATED_AT)
async def async_turn_on(self):
"""Turn the media player on."""
await self._async_call_service(SERVICE_TURN_ON, allow_override=True)
async def async_turn_off(self):
"""Turn the media player off."""
await self._async_call_service(SERVICE_TURN_OFF, allow_override=True)
async def async_mute_volume(self, mute):
"""Mute the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
await self._async_call_service(SERVICE_VOLUME_MUTE, data, allow_override=True)
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
await self._async_call_service(SERVICE_VOLUME_SET, data, allow_override=True)
async def async_media_play(self):
"""Send play command."""
await self._async_call_service(SERVICE_MEDIA_PLAY)
async def async_media_pause(self):
"""Send pause command."""
await self._async_call_service(SERVICE_MEDIA_PAUSE)
async def async_media_stop(self):
"""Send stop command."""
await self._async_call_service(SERVICE_MEDIA_STOP)
async def async_media_previous_track(self):
"""Send previous track command."""
await self._async_call_service(SERVICE_MEDIA_PREVIOUS_TRACK)
async def async_media_next_track(self):
"""Send next track command."""
await self._async_call_service(SERVICE_MEDIA_NEXT_TRACK)
async def async_media_seek(self, position):
"""Send seek command."""
data = {ATTR_MEDIA_SEEK_POSITION: position}
await self._async_call_service(SERVICE_MEDIA_SEEK, data)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type, ATTR_MEDIA_CONTENT_ID: media_id}
await self._async_call_service(SERVICE_PLAY_MEDIA, data)
async def async_volume_up(self):
"""Turn volume up for media player."""
await self._async_call_service(SERVICE_VOLUME_UP, allow_override=True)
async def async_volume_down(self):
"""Turn volume down for media player."""
await self._async_call_service(SERVICE_VOLUME_DOWN, allow_override=True)
async def async_media_play_pause(self):
"""Play or pause the media player."""
await self._async_call_service(SERVICE_MEDIA_PLAY_PAUSE)
async def async_select_source(self, source):
"""Set the input source."""
data = {ATTR_INPUT_SOURCE: source}
await self._async_call_service(SERVICE_SELECT_SOURCE, data, allow_override=True)
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._async_call_service(SERVICE_CLEAR_PLAYLIST)
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffling."""
data = {ATTR_MEDIA_SHUFFLE: shuffle}
await self._async_call_service(SERVICE_SHUFFLE_SET, data, allow_override=True)
async def async_update(self):
"""Update state in HA."""
for child_name in self._children:
child_state = self.hass.states.get(child_name)
if child_state and child_state.state not in OFF_STATES:
self._child_state = child_state
return
self._child_state = None
| |
#!/usr/bin/env python
"""
Compliance Checker
"""
from functools import wraps
import pprint
import warnings
from netCDF4 import Dataset
from owslib.swe.observation.sos100 import SensorObservationService_1_0_0
from owslib.swe.sensor.sml import SensorML
from owslib.namespaces import Namespaces
from compliance_checker import __version__, MemoizedDataset
from compliance_checker.util import kvp_convert
from collections import defaultdict
from lxml import etree
import sys
import re
import csv
from io import StringIO
import validators
import itertools
# Python 3.5+ should work, also have a fallback
try:
from typing import Pattern
re_pattern_type = Pattern
except ImportError:
re_pattern_type = type(re.compile(''))
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["ogc", "sml", "gml", "sos", "swe", "xlink"])
ns["ows"] = n.get_namespace("ows110")
return ns
def csv_splitter(input_string):
"""
csv_splitter(input_string)
Splits a string in CSV format and returns a flattened list
Parameters:
-----------
input_string: str
The string to be processed
Returns:
--------
list of str
A flattened list from the CSV processing contents
"""
csv_contents = csv.reader(StringIO(input_string))
return list(itertools.chain.from_iterable(csv_contents))
class ValidationObject(object):
validator_fail_msg = ''
expected_type = None
def __init__(self, split_func=None):
if split_func is None:
self.split_func = lambda x: [x]
else:
self.split_func = split_func
def validator_func(self, input_value):
"""
validator_func(self, input_value)
Function that should validate the result of a given input value
"""
raise NotImplementedError
def validate(self, input_name, input_value):
if self.expected_type is not None:
type_result = self.validate_type(input_name, input_value)
if not type_result[0]:
return type_result
validator_stat = True
for processed_value in self.split_func(input_value):
validator_result = self.validator_func(processed_value)
if not validator_result:
return False, self.validator_fail_msg.format(input_name)
# if all pass, then we're good.
return True, None
def validate_type(self, input_name, input_value):
if not isinstance(input_value, self.expected_type):
expected_type_fmt = "Attribute {} should be instance of type {}"
return (False,
[expected_type_fmt.format(input_name,
self.expected_type.__name__)])
else:
return True, None
class EmailValidator(ValidationObject):
validator_fail_msg = "{} must be a valid email address"
expected_type = str
def validator_func(self, input_value):
return validators.email(input_value)
class RegexValidator(ValidationObject):
expected_type = str
validator_regex = r'^.+$'
validator_fail_msg = "{} must not be an empty string"
def validator_func(self, input_value):
return bool(re.search(self.validator_regex, input_value))
class UrlValidator(ValidationObject):
validator_fail_msg = "{} must be a valid URL"
expected_type = str
def validator_func(self, input_value):
return bool(validators.url(input_value))
# Simple class for Generic File type (default to this if file not recognised)
class GenericFile(object):
"""
Simple class for any file. Has same path lookup as netCDF4.Dataset.
"""
def __init__(self, fpath):
self.fpath = fpath
def filepath(self):
return self.fpath
class BaseCheck(object):
HIGH = 3
MEDIUM = 2
LOW = 1
_cc_checker_version = __version__
_cc_display_headers = {
3: 'High Priority',
2: 'Medium Priority',
1: 'Low Priority'
}
supported_ds = []
def setup(self, ds):
"""
Common setup method for a Checker.
Automatically run when running a CheckSuite. Define this method in your Checker class.
"""
pass
def __init__(self, options=None):
self._defined_results = defaultdict(lambda: defaultdict(dict))
if options is None:
self.options = set()
else:
self.options = options
def get_test_ctx(self, severity, name, variable=None):
"""
Creates an existing TestCtx object in _defined_results dict if it does
not exist for the current checker instance, or an returns the existing
TestCtx for modification. Takes a severity level and name and uses the
two element tuple formed by the arguments as a key into the dict.
:param int severity: A BaseCheck severity level
:param str name: The name of the check
:rtype compliance_checker.base.TestCtx:
:returns: A new or or existing `TestCtx` instance taken from this
instance's _defined_results dict
"""
# Is it necessary to key out by severity? Is severity level unique
# per check? If so, it could be eliminated from key hierarchy
if severity not in self._defined_results[name][variable]:
self._defined_results[name][variable][severity] = \
TestCtx(severity, name, variable=variable)
return self._defined_results[name][variable][severity]
class BaseNCCheck(object):
"""
Base Class for NetCDF Dataset supporting Check Suites.
"""
supported_ds = {Dataset, MemoizedDataset}
@classmethod
def std_check_in(cls, dataset, name, allowed_vals):
"""
Returns 0 if attr not present, 1 if present but not in correct value, 2 if good
"""
if name not in dataset.ncattrs():
return 0
ret_val = 1
if dataset.getncattr(name) in allowed_vals:
ret_val += 1
return ret_val
@classmethod
def std_check(cls, dataset, name):
return name in dataset.ncattrs()
class BaseSOSGCCheck(object):
"""
Base class for SOS-GetCapabilities supporting Check Suites.
"""
supported_ds = [SensorObservationService_1_0_0]
class BaseSOSDSCheck(object):
"""
Base class for SOS-DescribeSensor supporting Check Suites.
"""
supported_ds = [SensorML]
class Result(object):
"""
Holds the result of a check method.
Stores such information as the check's value (True, False, a 2-tuple of (pass, total) or None for a skip),
weight of the check, any granular messages, or a hierarchy of results. If given value is not a tuple, it
is cast as a boolean using the bool() function.
Stores the checker instance and the check method that produced this result.
"""
def __init__(self,
weight=BaseCheck.MEDIUM,
value=None,
name=None,
msgs=None,
children=None,
checker=None,
check_method=None,
variable_name=None):
self.weight = weight
if value is None:
self.value = None
elif isinstance(value, tuple):
assert len(value) == 2, 'Result value must be 2-tuple or boolean!'
self.value = value
else:
self.value = bool(value)
self.name = name
self.msgs = msgs or []
self.children = children or []
self.checker = checker
self.check_method = check_method
self.variable_name = variable_name
def __repr__(self):
ret = '{} (*{}): {}'.format(self.name, self.weight, self.value)
if len(self.msgs):
if len(self.msgs) == 1:
ret += ' ({})'.format(self.msgs[0])
else:
ret += ' ({!s} msgs)'.format(len(self.msgs))
if len(self.children):
ret += ' ({!s} children)'.format(len(self.children))
ret += '\n' + pprint.pformat(self.children)
return ret
def serialize(self):
'''
Returns a serializable dictionary that represents the result object
'''
return {
'name' : self.name,
'weight' : self.weight,
'value' : self.value,
'msgs' : self.msgs,
'children' : [i.serialize() for i in self.children]
}
def __eq__(self, other):
return self.serialize() == other.serialize()
class TestCtx(object):
'''
Simple struct object that holds score values and messages to compile into a result
'''
def __init__(self, category=None, description='', out_of=0, score=0,
messages=None, variable=None):
self.category = category or BaseCheck.LOW
self.out_of = out_of
self.score = score
self.messages = messages or []
self.description = description or ''
self.variable = variable
def to_result(self):
return Result(self.category, (self.score, self.out_of),
self.description, self.messages,
variable_name=self.variable)
def assert_true(self, test, message):
'''
Increments score if test is true otherwise appends a message
:rtype: bool
:return: Boolean indicating whether test condition passed or not
'''
self.out_of += 1
if test:
self.score += 1
else:
self.messages.append(message)
return test
def std_check_in(base_context, name, allowed_vals):
"""
Check that a value is contained within an iterable
Parameters:
-----------
base_context: netCDF4.Dataset or netCDF4.variable
The context in which to look for the attribute, either a
netCDF4.Dataset or netCDF4.Variable. If a netCDF dataset,
the attribute is searched for in the global attributes.
If a variable, the attributes are limited to those contained
in the corresponding variable.
name: str
The name of the attribute to search for.
allowed_vals: iterable
An iterable, usually a set, which provides the possible valid values for
the attribute.
Returns:
--------
int
Returns 0 if attr not present, 1 if present but not in correct value, 2
if good.
"""
if not hasattr(base_context, name):
return 0
ret_val = 1
if base_context.getncattr(name) in allowed_vals:
ret_val += 1
return ret_val
def std_check(dataset, name):
if hasattr(dataset, name):
getattr(dataset, name)
return True
return False
def xpath_check(tree, xpath):
"""Checks whether tree contains one or more elements matching xpath"""
return len(xpath(tree)) > 0
def maybe_get_global_attr(attr_name, ds):
if attr_name in ds.ncattrs():
return True, ds.getncattr(attr_name)
else:
err_msg = "{} not present"
return False, [err_msg.format(attr_name)]
def attr_check(kvp, ds, priority, ret_val, gname=None, var_name=None):
"""
Handles attribute checks for simple presence of an attribute, presence of
one of several attributes, and passing a validation function. Returns a
status along with an error message in the event of a failure. Mutates
ret_val parameter
:param tuple(str, func) or str l: the attribute being checked
:param netCDF4 dataset ds : dataset being checked
:param int priority : priority level of check
:param list ret_val : result to be returned
:param str or None gname : group name assigned to a group of attribute Results
:param str or None var_name : name of the variable which contains this attribute
"""
msgs = []
name, other = kvp
if var_name is not None:
display_name = "attribute {} in variable {}".format(name, var_name)
base_context = ds.variables[var_name]
else:
display_name = name
base_context = ds
if other is None:
res = std_check(ds, name)
if not res:
msgs = ["{} not present".format(display_name)]
else:
try:
# see if this attribute is a string, try stripping
# whitespace, and return an error if empty
att_strip = base_context.getncattr(name).strip()
if not att_strip:
res = False
msgs = ["{} is empty or completely whitespace".format(
display_name)]
# if not a string/has no strip method we should be OK
except AttributeError:
pass
# gname arg allows the global attrs to be grouped together
ret_val.append(Result(
priority,
value=res,
name=gname if gname else name,
msgs=msgs,
variable_name=var_name
))
elif hasattr(other, '__iter__'):
# redundant, we could easily do this with a hasattr
# check instead
res = std_check_in(base_context, name, other)
if res == 0:
msgs.append("{} not present".format(display_name))
elif res == 1:
msgs.append("{} present, but not in expected value list ({})"
.format(display_name, sorted(other)))
ret_val.append(
Result(
priority,
(res, 2),
gname if gname else name, # groups Globals if supplied
msgs,
variable_name=var_name
)
)
# if we have an XPath expression, call it on the document
elif type(other) is etree.XPath:
# TODO: store tree instead of creating it each time?
# no execution path for variable
res = xpath_check(ds._root, other)
if not res:
msgs = ["XPath for {} not found".format(display_name)]
ret_val.append(
Result(
priority,
res,
gname if gname else name,
msgs,
variable_name=var_name
)
)
# check if this is a subclass of ValidationObject
elif isinstance(other, ValidationObject):
attr_result = maybe_get_global_attr(name, ds)
if not attr_result[0]:
res_tup = attr_result
else:
check_val = attr_result[1]
res_tup = other.validate(name, check_val)
msgs = [] if res_tup[1] is None else [res_tup[1]]
ret_val.append(
Result(
priority,
res_tup[0],
name,
msgs
)
)
elif isinstance(other, re_pattern_type):
attr_result = maybe_get_global_attr(name, ds)
if not attr_result[0]:
return attr_result
else:
check_val = attr_result[1]
if not isinstance(check_val, str):
res = False
msgs = ["{} must be a string".format(name)]
elif not other.search(check_val):
res = False
msgs = ["{} must match regular expression {}".format(name, other)]
else:
res = True
msgs = []
ret_val.append(Result(
priority,
value=res,
name=gname if gname else name,
msgs=msgs
))
# if the attribute is a function, call it
# right now only supports single attribute
# important note: current magic approach uses all functions
# starting with "check". Avoid naming check functions
# starting with check if you want to pass them in with
# a tuple to avoid them being checked more than once
elif hasattr(other, '__call__'):
# check that the attribute is actually present.
# This reduces boilerplate in functions by not needing
# to check whether the attribute is present every time
# and instead focuses on the core functionality of the
# test
res = other(base_context) # call the method on the dataset
if not res:
msgs = ["{} not present".format(display_name)]
ret_val.append(
Result(
priority,
res,
gname if gname else name,
msgs,
variable_name=var_name
)
)
else:
ret_val.append(res(priority))
# unsupported second type in second
else:
raise TypeError("Second arg in tuple has unsupported type: {}"
.format(type(other)))
return ret_val
def check_has(priority=BaseCheck.HIGH, gname=None):
"""Decorator to wrap a function to check if a dataset has given attributes.
:param function func: function to wrap"""
def _inner(func):
def _dec(s, ds):
attr_process = kvp_convert(func(s, ds))
ret_val = []
# could potentially run tests in parallel if we eliminated side
# effects on `ret_val`
for kvp in attr_process.items():
# function mutates ret_val
attr_check(kvp, ds, priority, ret_val, gname)
return ret_val
return wraps(func)(_dec)
return _inner
def fix_return_value(v, method_name, method=None, checker=None):
"""
Transforms scalar return values into Result.
"""
# remove common check prefix
method_name = (method_name or method.__func__.__name__).replace("check_","")
if v is None or not isinstance(v, Result):
v = Result(value=v, name=method_name)
v.name = v.name or method_name
v.checker = checker
v.check_method = method
return v
def ratable_result(value, name, msgs, variable_name=None):
"""Returns a partial function with a Result that has not been weighted."""
return lambda w: Result(w, value, name, msgs, variable_name=variable_name)
def score_group(group_name=None):
'''
Warning this is deprecated as of Compliance Checker v3.2!
Please do not using scoring groups and update your plugins
if necessary
'''
warnings.warn('Score_group is deprecated as of Compliance Checker v3.2.')
def _inner(func):
def _dec(s, ds):
ret_val = func(s, ds)
"""
if group_name != None and not isinstance(ret_val[0], tuple):
return tuple([(group_name, ret_val[0])] + list(ret_val[1:]))
"""
# multiple returns
if not isinstance(ret_val, list):
ret_val = [ret_val]
def dogroup(r):
cur_grouping = r.name
if isinstance(cur_grouping, tuple):
cur_grouping = list(cur_grouping)
elif not isinstance(cur_grouping, list):
cur_grouping = [cur_grouping]
cur_grouping.insert(0, group_name)
return Result(r.weight, r.value, tuple(cur_grouping), r.msgs)
ret_val = [fix_return_value(x, func.__name__, func, s) for x in
ret_val]
ret_val = list(map(dogroup, ret_val))
return ret_val
return wraps(func)(_dec)
return _inner
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide suport modules for testing Bokeh itself.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import io
import os
from os.path import join, exists, dirname, basename, normpath, relpath, splitext, isfile, isdir
import yaml
from subprocess import Popen, PIPE
from base64 import b64decode
# External imports
import requests
# Bokeh imports
from bokeh._testing.util.git import __version__
from bokeh._testing.util.s3 import S3_URL, upload_file_to_s3
from bokeh._testing.util.travis import JOB_ID
from bokeh._testing.util.images import image_diff
from bokeh.util.terminal import trace, green
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'add_examples',
'collect_examples',
'Example',
'Flags',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Flags(object):
js = 1 << 0
file = 1 << 1
server = 1 << 2
notebook = 1 << 3
slow = 1 << 4 # example needs a lot of time to run (> 30 s) (e.g. choropleth.py)
skip = 1 << 5 # don't run example at all (e.g. notebooks are completely broken)
xfail = 1 << 6 # test is expected to fail, which doesn't fail the test suite
no_js = 1 << 7 # skip bokehjs and thus image diff (e.g. google maps key issue)
no_diff = 1 << 8 # skip only image diff (e.g. inherent randomness as in jitter)
class Example(object):
def __init__(self, path, flags, examples_dir):
self.path = normpath(path)
self.flags = flags
self.examples_dir = examples_dir
self._diff_ref = None
self._upload = False
self.pixels = 0
self._has_ref = None
self._has_baseline = None
self._baseline_ok = True
def __str__(self):
flags = [
"js" if self.is_js else "",
"file" if self.is_file else "",
"server" if self.is_server else "",
"notebook" if self.is_notebook else "",
"slow" if self.is_slow else "",
"skip" if self.is_skip else "",
"xfail" if self.xfail else "",
"no_js" if self.no_js else "",
"no_diff" if self.no_diff else "",
]
return "Example(%r, %s)" % (self.relpath, "|".join(f for f in flags if f))
__repr__ = __str__
@property
def name(self):
return basename(self.path_no_ext)
@property
def base_dir(self):
return dirname(self.path)
@property
def imgs_dir(self):
return join(dirname(self.path), ".tests")
@property
def relpath(self):
return relpath(self.path, self.examples_dir)
@property
def path_no_ext(self):
return splitext(self.path)[0]
@property
def relpath_no_ext(self):
return splitext(self.relpath)[0]
@property
def is_js(self):
return self.flags & Flags.js
@property
def is_file(self):
return self.flags & Flags.file
@property
def is_server(self):
return self.flags & Flags.server
@property
def is_notebook(self):
return self.flags & Flags.notebook
@property
def is_slow(self):
return self.flags & Flags.slow
@property
def is_skip(self):
return self.flags & Flags.skip
@property
def is_xfail(self):
return self.flags & Flags.xfail
@property
def no_js(self):
return self.flags & Flags.no_js
@property
def no_diff(self):
return self.flags & Flags.no_diff
@property
def baseline_ok(self):
return self.has_baseline and self._baseline_ok
@property
def baseline_path(self):
return join("tests", "baselines", relpath(self.path_no_ext, ""))
@property
def has_baseline(self):
if self._has_baseline is None:
cmd = ["git", "show", ":%s" % self.baseline_path]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
proc.communicate()
self._has_baseline = proc.returncode == 0
return self._has_baseline
def store_baseline(self, baseline):
path = self.baseline_path
if not exists(dirname(path)):
os.makedirs(dirname(path))
with io.open(path, "w", newline="\n") as f:
f.write(baseline)
def diff_baseline(self):
cmd = ["git", "diff", "--color", "--exit-code", "%s" % self.baseline_path]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
(diff, _) = proc.communicate()
if proc.returncode == 0:
return None
cmd = ["perl", "/usr/share/doc/git/contrib/diff-highlight/diff-highlight"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
(hl_diff, _) = proc.communicate(diff)
if proc.returncode == 0:
diff = hl_diff
self._baseline_ok = False
return diff.decode("utf-8").strip()
@property
def img_path_or_url(self):
return self.img_path if not self._upload else self.img_url
@property
def ref_path_or_url(self):
return self.ref_path if not self._upload else self.ref_url
@property
def diff_path_or_url(self):
return self.diff_path if not self._upload else self.diff_url
@property
def img_path(self):
return join(self.imgs_dir, "%s-%s-%s.png" % (self.name, __version__, JOB_ID))
@property
def ref_path(self):
return join(self.imgs_dir, "%s-%s-%s.png" % (self.name, self._diff_ref, JOB_ID))
@property
def diff_path(self):
return join(self.imgs_dir, "%s-%s-%s-diff-%s.png" % (self.name, __version__, self._diff_ref, JOB_ID))
@property
def img_url(self):
return join(S3_URL, self.img_url_path)
@property
def ref_url(self):
return join(S3_URL, self.ref_url_path)
@property
def diff_url(self):
return join(S3_URL, self.diff_url_path)
@property
def img_url_path(self):
return join("travis", "image_refs", __version__, self.relpath_no_ext) + '.png'
@property
def ref_url_path(self):
return join("travis", "image_refs", self._diff_ref, self.relpath_no_ext) + '.png'
@property
def diff_url_path(self):
return join("travis", "image_refs", __version__, self.relpath_no_ext) + self._diff_ref + '-diff.png'
@property
def has_ref(self):
return self._has_ref
def fetch_ref(self):
if self._has_ref is None:
response = requests.get(self.ref_url)
self._has_ref = response.ok
if response.ok:
_store_binary(self.ref_path, response.content)
return self._has_ref
def store_img(self, img_data):
_store_binary(self.img_path, b64decode(img_data))
def upload_imgs(self):
if isfile(self.img_path):
trace("%s Uploading image to S3 to %s" % (green(">>>"), self.img_path))
upload_file_to_s3(self.img_path, self.img_url_path, "image/png")
if isfile(self.diff_path):
trace("%s Uploading image to S3 to %s" % (green(">>>"), self.diff_path))
upload_file_to_s3(self.diff_path, self.diff_url_path, "image/png")
@property
def images_differ(self):
return self.pixels != 0
def image_diff(self):
self.pixels = image_diff(self.diff_path, self.img_path, self.ref_path)
return self.pixels
def add_examples(list_of_examples, path, examples_dir, example_type=None, slow=None, skip=None, xfail=None, no_js=None, no_diff=None):
if path.endswith("*"):
star_path = join(examples_dir, path[:-1])
for name in sorted(os.listdir(star_path)):
if isdir(join(star_path, name)):
add_examples(list_of_examples, join(path[:-1], name), examples_dir, example_type, slow, skip, xfail, no_js, no_diff)
return
example_path = join(examples_dir, path)
for name in sorted(os.listdir(example_path)):
flags = 0
orig_name = name
if name.startswith(('_', '.')):
continue
elif name.endswith(".py"):
flags |= example_type if example_type else Flags.file
elif name.endswith(".ipynb"):
flags |= Flags.notebook
elif isdir(join(example_path, name)):
if exists(join(example_path, name, name + ".html")):
name = join(name, name + ".html")
flags |= example_type if example_type else Flags.js
elif exists(join(example_path, name, name + ".py")):
name = join(name, name + ".py")
flags |= example_type if example_type else Flags.file
elif exists(join(example_path, name, "main.py")):
# name is unchanged and passed as the example name
flags |= example_type if example_type else Flags.server
else:
continue
else:
continue
if slow is not None and orig_name in slow:
flags |= Flags.slow
if skip is not None and (skip == 'all' or orig_name in skip):
flags |= Flags.skip
if xfail is not None and (xfail == 'all' or orig_name in xfail):
flags |= Flags.xfail
if no_js is not None and (no_js == 'all' or orig_name in no_js):
flags |= Flags.no_js
if no_diff is not None and (no_diff == 'all' or orig_name in no_diff):
flags |= Flags.no_diff
list_of_examples.append(Example(join(example_path, name), flags, examples_dir))
def collect_examples(config_path):
examples_dir = dirname(config_path)
list_of_examples = []
with open(config_path, "r") as f:
examples = yaml.safe_load(f.read())
for example in examples:
path = example["path"]
if example.get("type") is not None:
example_type = getattr(Flags, example["type"])
else:
example_type = None
slow_status = example.get("slow")
skip_status = example.get("skip")
xfail_status = example.get("xfail")
no_js_status = example.get("no_js")
no_diff_status = example.get("no_diff")
add_examples(list_of_examples, path, examples_dir,
example_type=example_type, slow=slow_status, skip=skip_status, xfail=xfail_status, no_js=no_js_status, no_diff=no_diff_status)
return list_of_examples
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _store_binary(path, data):
directory = dirname(path)
if not exists(directory):
os.makedirs(directory)
with open(path, "wb") as f:
f.write(data)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
from bitstring import BitStream, BitArray
def getEqualInnerNeighbours(locationalcode):
# Set up the different values of the input materialised path
# print "Current node: ", locationalcode
# The final digit of the materialised path for calculating inner neighbours
leafDecimal = int(locationalcode[-1])
# Check inner neighbours
# take the mask to switch on/off every bit (dimension) of the highst level of resolution
xBinary = BitArray(bin="{0:03b}".format(leafDecimal)) ^ BitArray(bin='001')
yBinary = BitArray(bin="{0:03b}".format(leafDecimal)) ^ BitArray(bin='010')
# zBinary = BitArray(bin="{0:03b}".format(leafDecimal)) ^ BitArray(bin='100')
# results in 3 inner neighbours (a, b and c) --> it converts the binary coordinate back to integers
xMP = "{0}{1}".format(locationalcode[0:-1], int(xBinary.bin, 2))
yMP = "{0}{1}".format(locationalcode[0:-1], int(yBinary.bin, 2))
# zMP = "{0}{1}".format(locationalcode[0:-1], int(zBinary.bin, 2))
# return [xMP, yMP, zMP]
return [xMP, yMP]
def getK(locationalcode, dimension):
dimension = dimension.lower()
# NOTE: this is only the code for x in the paper (and z in our case - the paper has the binary numbers flipped in the Matrices of J, B and E)
# print "Get K for node: {0} in dimension: {1}\n".format(locationalcode, dimension)
if dimension == 'x':
dimension = 2
elif dimension == 'y':
dimension = 1
# elif dimension == 'z':
# dimension = 0
else:
return "Please input a dimension!"
n = len(locationalcode)
prevNode = None
for i, node in enumerate(reversed(locationalcode)):
if i == 0: # Here we define Xn (or Yn or Zn for that matter), which doesn't get checked any further because it's the first position
Xn = "{0:03b}".format(int(node))[dimension]
else:
if "{0:03b}".format(int(node))[dimension] == Xn: #X(n-i)
pass
else:
if int("{0:03b}".format(int(prevNode))[dimension]) == int(Xn): #X(n-k)+1
return i #in this case i == k
prevNode = node
if i+1 == n: #in this case there's no neigbor in this direction
# print "No neigbor found!"
return False
def getEqualOuterNeighbours(locationalcode):
Kx = getK(locationalcode, 'x')
Ky = getK(locationalcode, 'y')
# Kz = getK(locationalcode, 'z')
# take the complement of the entire materialised for every dimension
xMP = ""
yMP = ""
# zMP = ""
for i, node in enumerate(reversed(locationalcode)):
if Kx == False:
pass
elif i > Kx:
xMP += str(node)
else:
nodex = BitArray(bin="{0:03b}".format(int(node)))
xDigit = nodex ^ BitArray(bin='001')
xMP += str(int(xDigit.bin, 2))
if Ky == False:
pass
elif i > Ky:
yMP += str(node)
else:
nodey = BitArray(bin="{0:03b}".format(int(node)))
yDigit = nodey ^ BitArray(bin='010')
yMP += str(int(yDigit.bin, 2))
# if Kz == False:
# pass
# elif i > Kz:
# zMP += str(node)
# else:
# nodez = BitArray(bin="{0:03b}".format(int(node)))
# zDigit = nodez ^ BitArray(bin='100')
# zMP += str(int(zDigit.bin, 2))
neighbours = []
neighbours.append((xMP[::-1], Kx))
neighbours.append((yMP[::-1], Ky))
# neighbours.append((zMP[::-1], Kz))
return neighbours
def getLargerNeighbours(equalNeighbours):
# We create an empty list that will contain all larger sized neighbours
largerNeighbours = set()
# We loop through all the equal sized neighbours
for neighbour in equalNeighbours:
# For every equal sized neighbour we loop through the digits
for i, digit in enumerate(neighbour[0]):
# If the level is smaller than K we append it to the list with larger sized neighbours
if i < neighbour[1]:
i += 1
largerNeighbours.add(neighbour[0][:-i])
# return largerNeighbours, [equalNeighbours[0][0],equalNeighbours[1][0],equalNeighbours[2][0]]
return largerNeighbours, [equalNeighbours[0][0],equalNeighbours[1][0]]
def createMPs(curDict, prevNeighbours):
newNeighbours = set()
# print curDict
# print prevNeighbours
for prev in prevNeighbours:
for each in curDict:
new = prev + each
newNeighbours.add(new)
# print newNeighbours
return newNeighbours
def getSmallerNeighbours(EqualInnerNeighbours, EqualOuterNeighbours, maxLevels, currentNode):
neighbours = set()
# print currentNode, BitArray(bin="{0:03b}".format(int(currentNode[-1])))
Xn = int(BitArray(bin="{0:03b}".format(int(currentNode[-1])))[2])
Yn = int(BitArray(bin="{0:03b}".format(int(currentNode[-1])))[1])
# Zn = int(BitArray(bin="{0:03b}".format(int(currentNode[-1])))[0])
compXn = int((bin(Xn) ^ BitArray(bin='1')).bin)
# print Xn, compXn
compYn = int((bin(Yn) ^ BitArray(bin='1')).bin)
# print Yn, compYn
# compZn = int((bin(Zn) ^ BitArray(bin='1')).bin)
# print Zn, compZn
Dict = {
'innerx' : [],
'innery' : [],
# 'innerz' : [],
'outerx' : [],
'outery' : [],
# 'outerz' : []
}
for i in range(4):
d1 = int(BitArray(bin="{0:02b}".format(i))[0])
d2 = int(BitArray(bin="{0:02b}".format(i))[1])
Dict['innerx'].append(str(int(BitArray(bin="{0}{1}{2}".format(d1, d2, Xn)).bin, 2)))
Dict['innery'].append(str(int(BitArray(bin="{0}{1}{2}".format(d1, Yn, d2)).bin, 2)))
# Dict['innerz'].append(str(int(BitArray(bin="{0}{1}{2}".format(Zn, d1, d2)).bin, 2)))
Dict['outerx'].append(str(int(BitArray(bin="{0}{1}{2}".format(d1, d2, compXn)).bin, 2)))
Dict['outery'].append(str(int(BitArray(bin="{0}{1}{2}".format(d1, compYn, d2)).bin, 2)))
# Dict['outerz'].append(str(int(BitArray(bin="{0}{1}{2}".format(compZn, d1, d2)).bin, 2)))
# print Dict
# prevInnerX, prevInnerY, prevInnerZ, prevOuterX, prevOuterY, prevOuterZ = ([EqualInnerNeighbours[0]]), ([EqualInnerNeighbours[1]]), ([EqualInnerNeighbours[2]]), ([EqualOuterNeighbours[0]]), ([EqualOuterNeighbours[1]]), ([EqualOuterNeighbours[2]])
prevInnerX, prevInnerY, prevOuterX, prevOuterY = ([EqualInnerNeighbours[0]]), ([EqualInnerNeighbours[1]]), ([EqualOuterNeighbours[0]]), ([EqualOuterNeighbours[1]])
# print prevInnerX, prevInnerY, prevInnerZ, prevOuterX, prevOuterY, prevOuterZ
neighboursXi = set()
neighboursYi = set()
# neighboursZi = set()
neighboursXo = set()
neighboursYo = set()
# neighboursZo = set()
for i in range(maxLevels-len(currentNode)):
newInnerX = createMPs(Dict['innerx'], prevInnerX)
for each in newInnerX:
neighboursXi.add(each)
prevInnerX = newInnerX
newInnerY = createMPs(Dict['innery'], prevInnerY)
for each in newInnerY:
neighboursYi.add(each)
prevInnerY = newInnerY
# newInnerZ = createMPs(Dict['innerz'], prevInnerZ)
# for each in newInnerZ:
# neighboursZi.add(each)
# prevInnerZ = newInnerZ
if prevOuterX != ['']:
newOuterX = createMPs(Dict['outerx'], prevOuterX)
for each in newOuterX:
neighboursXo.add(each)
prevOuterX = newOuterX
if prevOuterY != ['']:
newOuterY = createMPs(Dict['outery'], prevOuterY)
for each in newOuterY:
neighboursYo.add(each)
prevOuterY = newOuterY
# if prevOuterZ != ['']:
# newOuterZ = createMPs(Dict['outerz'], prevOuterZ)
# for each in newOuterZ:
# neighboursZo.add(each)
# prevOuterZ = newOuterZ
# neighbours = neighboursZo | neighboursYo | neighboursXo | neighboursZi | neighboursYi | neighboursXi
neighbours = neighboursYo | neighboursXo | neighboursYi | neighboursXi
return neighbours
def giveMeAllXYNeighbours(currentNode, maxLevels):
EqualOuterNeighbours = getEqualOuterNeighbours(currentNode)
EqualInnerNeighbours = getEqualInnerNeighbours(currentNode)
LargerNeighbours, EqualOuterNeighbours = getLargerNeighbours(EqualOuterNeighbours)
# print EqualOuterNeighbours
SmallerNeighbours = getSmallerNeighbours(EqualInnerNeighbours, EqualOuterNeighbours, maxLevels, currentNode)
allNeighbours = set()
for i in range(2):
allNeighbours.add(EqualOuterNeighbours[i])
allNeighbours.add(EqualInnerNeighbours[i])
allNeighbours = allNeighbours.union(LargerNeighbours)
allNeighbours = allNeighbours.union(SmallerNeighbours)
# print allNeighbours
return allNeighbours
if (__name__ == '__main__'):
neighbours = giveMeAllXYNeighbours('151422', 8)
| |
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import uuid
import mock
from oslo_serialization import jsonutils
import pkg_resources
import requests
from glance.api.v3 import artifacts
from glance.api.v3 import router
from glance.common.artifacts import definitions
from glance.common.artifacts import loader
from glance.common import wsgi
from glance.tests import functional
TENANT1 = str(uuid.uuid4())
class Artifact(definitions.ArtifactType):
__type_name__ = "WithProps"
prop1 = definitions.String()
prop2 = definitions.Integer()
prop_list = definitions.Array(item_type=definitions.Integer())
tuple_prop = definitions.Array(item_type=[definitions.Integer(),
definitions.Boolean()])
dict_prop = definitions.Dict(properties={
"foo": definitions.String(),
"bar_list": definitions.Array(definitions.Integer())})
dict_prop_strval = definitions.Dict(properties=definitions.String())
depends_on = definitions.ArtifactReference()
depends_on_list = definitions.ArtifactReferenceList()
class ArtifactNoProps(definitions.ArtifactType):
__type_name__ = "NoProp"
class ArtifactNoProps1(definitions.ArtifactType):
__type_name__ = "NoProp"
__type_version__ = "0.5"
class ArtifactWithBlob(definitions.ArtifactType):
__type_name__ = "WithBlob"
blob1 = definitions.BinaryObject()
blob_list = definitions.BinaryObjectList()
def _create_resource():
plugins = None
mock_this = 'stevedore.extension.ExtensionManager._find_entry_points'
with mock.patch(mock_this) as fep:
path = 'glance.tests.functional.artifacts.test_artifacts'
fep.return_value = [
pkg_resources.EntryPoint.parse('WithProps=%s:Artifact' % path),
pkg_resources.EntryPoint.parse(
'NoProp=%s:ArtifactNoProps' % path),
pkg_resources.EntryPoint.parse(
'NoProp=%s:ArtifactNoProps1' % path),
pkg_resources.EntryPoint.parse(
'WithBlob=%s:ArtifactWithBlob' % path)
]
plugins = loader.ArtifactsPluginLoader('glance.artifacts.types')
deserializer = artifacts.RequestDeserializer(plugins=plugins)
serializer = artifacts.ResponseSerializer()
controller = artifacts.ArtifactsController(plugins=plugins)
return wsgi.Resource(controller, deserializer, serializer)
class TestRouter(router.API):
def _get_artifacts_resource(self):
return _create_resource()
class TestArtifacts(functional.FunctionalTest):
def setUp(self):
super(TestArtifacts, self).setUp()
self.start_servers(**self.__dict__.copy())
def tearDown(self):
self.stop_servers()
self._reset_database(self.api_server.sql_connection)
super(TestArtifacts, self).tearDown()
def _url(self, path):
return 'http://127.0.0.1:%d/v3/artifacts%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def start_servers(self, **kwargs):
new_paste_conf_base = """[pipeline:glance-api]
pipeline = versionnegotiation gzip unauthenticated-context rootapp
[pipeline:glance-api-caching]
pipeline = versionnegotiation gzip unauthenticated-context cache rootapp
[pipeline:glance-api-cachemanagement]
pipeline =
versionnegotiation
gzip
unauthenticated-context
cache
cache_manage
rootapp
[pipeline:glance-api-fakeauth]
pipeline = versionnegotiation gzip fakeauth context rootapp
[pipeline:glance-api-noauth]
pipeline = versionnegotiation gzip context rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
/v3: apiv3app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[app:apiv3app]
paste.app_factory =
glance.tests.functional.artifacts.test_artifacts:TestRouter.factory
[filter:versionnegotiation]
paste.filter_factory =
glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:gzip]
paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cache_manage]
paste.filter_factory =
glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory =
glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:fakeauth]
paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory
"""
self.cleanup()
self.api_server.paste_conf_base = new_paste_conf_base
super(TestArtifacts, self).start_servers(**kwargs)
def _create_artifact(self, type_name, type_version='1.0', data=None,
status=201):
# create an artifact first
artifact_data = data or {'name': 'artifact-1',
'version': '12'}
return self._check_artifact_post('/%s/v%s/drafts' % (type_name,
type_version),
artifact_data, status=status)
def _check_artifact_method(self, method, url, data=None, status=200,
headers=None):
if not headers:
headers = self._headers()
headers.setdefault("Content-Type", "application/json")
if 'application/json' in headers['Content-Type']:
data = jsonutils.dumps(data)
response = getattr(requests, method)(self._url(url), headers=headers,
data=data)
self.assertEqual(status, response.status_code)
if status >= 400:
return response.text
if "application/json" in response.headers["content-type"]:
return jsonutils.loads(response.text)
return response.text
def _check_artifact_post(self, url, data, status=201,
headers={'Content-Type': 'application/json'}):
return self._check_artifact_method("post", url, data, status=status,
headers=headers)
def _check_artifact_get(self, url, status=200):
return self._check_artifact_method("get", url, status=status)
def _check_artifact_delete(self, url, status=204):
response = requests.delete(self._url(url), headers=self._headers())
self.assertEqual(status, response.status_code)
return response.text
def _check_artifact_patch(self, url, data, status=200):
return self._check_artifact_method("patch", url, data, status)
def _check_artifact_put(self, url, data, status=200):
return self._check_artifact_method("put", url, data, status=status)
def test_list_any_artifacts(self):
"""Returns information about all draft artifacts with given endpoint"""
self._create_artifact('noprop')
artifacts = self._check_artifact_get('/noprop/drafts')
self.assertEqual(1, len(artifacts))
def test_list_last_version(self):
"""/artifacts/endpoint == /artifacts/endpoint/all-versions"""
self._create_artifact('noprop')
artifacts = self._check_artifact_get('/noprop/drafts')
self.assertEqual(1, len(artifacts))
# the same result can be achieved if asked for artifact with
# type_version=last version
artifacts_precise = self._check_artifact_get('/noprop/v1.0/drafts')
self.assertEqual(artifacts, artifacts_precise)
def test_list_artifacts_by_state(self):
"""Returns last version of artifacts with given state"""
self._create_artifact('noprop')
creating_state = self._check_artifact_get('/noprop/drafts')
self.assertEqual(1, len(creating_state))
# no active [/type_name/active == /type_name]
active_state = self._check_artifact_get('/noprop')
self.assertEqual(0, len(active_state))
def test_list_artifacts_with_version(self):
"""Supplying precise artifact version does not break anything"""
self._create_artifact('noprop')
list_creating = self._check_artifact_get('/noprop/v1.0/drafts')
self.assertEqual(1, len(list_creating))
bad_version = self._check_artifact_get('/noprop/v1.0bad',
status=400)
self.assertIn("Invalid version string: u'1.0bad'", bad_version)
def test_get_artifact_by_id_any_version(self):
data = self._create_artifact('noprop')
artifact_id = data['id']
artifacts = self._check_artifact_get(
'/noprop/%s' % artifact_id)
self.assertEqual(artifact_id, artifacts['id'])
def test_list_artifact_no_such_version(self):
"""Version filtering should be applied for existing plugins.
An attempt to retrieve an artifact out of existing plugin but with
a wrong version should result in
400 BadRequest 'No such plugin has been loaded'
"""
msg = self._check_artifact_get('/noprop/v0.0.9', 400)
self.assertIn("No plugin for 'noprop v 0.0.9' has been loaded",
msg)
def test_get_artifact_by_id(self):
data = self._create_artifact('noprop')
artifact_id = data['id']
artifacts = self._check_artifact_get(
'/noprop/%s' % artifact_id)
self.assertEqual(artifact_id, artifacts['id'])
# the same result can be achieved if asked for artifact with
# type_version=last version
artifacts_precise = self._check_artifact_get(
'/noprop/v1.0/%s' % artifact_id)
self.assertEqual(artifacts, artifacts_precise)
def test_get_artifact_basic_show_level(self):
no_prop_art = self._create_artifact('noprop')
art = self._create_artifact(
'withprops',
data={"name": "name", "version": "42",
"depends_on": no_prop_art['id']})
self.assertEqual(no_prop_art['id'], art['depends_on']['id'])
self.assertEqual(no_prop_art['name'], art['depends_on']['name'])
artifact_id = art['id']
artifact = self._check_artifact_get(
'/withprops/%s?show_level=basic' % artifact_id)
self.assertEqual(artifact_id, artifact['id'])
self.assertIsNone(artifact['depends_on'])
def test_get_artifact_none_show_level(self):
"""Create an artifact (with two deployer-defined properties)"""
artifact_data = {'name': 'artifact-1',
'version': '12',
'tags': ['gagaga', 'sesese'],
'prop1': 'Arthur Dent',
'prop2': 42}
art = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
expected_artifact = {
'state': 'creating',
'name': 'artifact-1',
'version': '12.0.0',
'tags': ['gagaga', 'sesese'],
'visibility': 'private',
'type_name': 'WithProps',
'type_version': '1.0',
'prop1': 'Arthur Dent',
'prop2': 42
}
for key, value in expected_artifact.items():
self.assertEqual(art[key], value, key)
artifact_id = art['id']
artifact = self._check_artifact_get(
'/withprops/%s?show_level=none' % artifact_id)
self.assertEqual(artifact_id, artifact['id'])
self.assertIsNone(artifact['prop1'])
self.assertIsNone(artifact['prop2'])
def test_get_artifact_invalid_show_level(self):
no_prop_art = self._create_artifact('noprop')
art = self._create_artifact(
'withprops',
data={"name": "name", "version": "42",
"depends_on": no_prop_art['id']})
self.assertEqual(no_prop_art['id'], art['depends_on']['id'])
self.assertEqual(no_prop_art['name'], art['depends_on']['name'])
artifact_id = art['id']
# 'hui' is invalid show level
self._check_artifact_get(
'/noprop/%s?show_level=yoba' % artifact_id, status=400)
def test_get_artifact_no_such_id(self):
msg = self._check_artifact_get(
'/noprop/%s' % str(uuid.uuid4()), status=404)
self.assertIn('No artifact found with ID', msg)
def test_get_artifact_present_id_wrong_type(self):
artifact_data = {'name': 'artifact-1',
'version': '12',
'prop1': '12',
'prop2': 12}
art1 = self._create_artifact('withprops', data=artifact_data)
art2 = self._create_artifact('noprop')
# ok id and type_name but bad type_version should result in 404
self._check_artifact_get('/noprop/v0.5/%s' % str(art2['id']),
status=404)
# try to access art2 by supplying art1.type and art2.id
self._check_artifact_get('/withprops/%s' % str(art2['id']),
status=404)
self._check_artifact_get('/noprop/%s' % str(art1['id']), status=404)
def test_delete_artifact(self):
artifact_data = {'name': 'artifact-1',
'version': '12',
'prop1': '12',
'prop2': 12}
art1 = self._create_artifact('withprops', data=artifact_data)
self._check_artifact_delete('/withprops/v1.0/%s' % art1['id'])
art1_deleted = self._check_artifact_get('/withprops/%s' % art1['id'],
status=404)
self.assertIn('No artifact found with ID', art1_deleted)
def test_delete_artifact_no_such_id(self):
self._check_artifact_delete('/noprop/v1/%s' % str(uuid.uuid4()),
status=404)
@unittest.skip("Test is unstable")
def test_delete_artifact_with_dependency(self):
# make sure that artifact can't be deleted if it has some dependencies
# still not deleted
art = self._create_artifact('withprops')
no_prop_art = self._create_artifact('noprop')
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'],
data=[{'value': no_prop_art['id'],
'op': 'replace',
'path': '/depends_on'},
{'value': [no_prop_art['id']],
'op': 'add',
'path': '/depends_on_list'}])
self.assertEqual(no_prop_art['id'], art_updated['depends_on']['id'])
self.assertEqual(1, len(art_updated['depends_on_list']))
# try to delete an artifact prior to its dependency
res = self._check_artifact_delete('/withprops/v1/%s' % art['id'],
status=400)
self.assertIn(
"Dependency property 'depends_on' has to be deleted first", res)
# delete a dependency
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'],
data=[{'op': 'remove', 'path': '/depends_on'}])
# try to delete prior to deleting artifact_list dependencies
res = self._check_artifact_delete('/withprops/v1/%s' % art['id'],
status=400)
self.assertIn(
"Dependency property 'depends_on_list' has to be deleted first",
res)
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'],
data=[{'op': 'remove', 'path': '/depends_on_list'}])
# delete dependency list
self._check_artifact_delete('/withprops/v1/%s' % art['id'])
def test_delete_artifact_with_blob(self):
# Upload some data to an artifact
art = self._create_artifact('withblob')
headers = self._headers({'Content-Type': 'application/octet-stream'})
self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'],
headers=headers,
data='ZZZZZ', status=200)
self._check_artifact_delete('/withblob/v1/%s' % art['id'])
def test_update_array_property_by_replace_op(self):
art = self._create_artifact('withprops', data={'name': 'some art',
'version': '4.2'})
self.assertEqual('some art', art['name'])
data = [{'op': 'replace', 'value': [1, 2, 3], 'path': '/prop_list'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s' %
art['id'],
data=data)
self.assertEqual([1, 2, 3], art_updated['prop_list'])
# now try to change first element of the list
data_change_first = [{'op': 'replace', 'value': 42,
'path': '/prop_list/1'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s' %
art['id'],
data=data_change_first)
self.assertEqual([1, 42, 3], art_updated['prop_list'])
# replace last element
data_change_last = [{'op': 'replace', 'value': 24,
'path': '/prop_list/-'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s' %
art['id'],
data=data_change_last)
self.assertEqual([1, 42, 24], art_updated['prop_list'])
def test_update_dict_property_by_replace_op(self):
art = self._create_artifact(
'withprops',
data={'name': 'some art',
'version': '4.2',
'dict_prop': {'foo': "Fenchurch", 'bar_list': [42, 42]}})
self.assertEqual({'foo': "Fenchurch", 'bar_list': [42, 42]},
art['dict_prop'])
data = [{'op': 'replace', 'value': 24,
'path': '/dict_prop/bar_list/0'},
{'op': 'replace', 'value': 'cello lesson',
'path': '/dict_prop/foo'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual({'foo': 'cello lesson', 'bar_list': [24, 42]},
art_updated['dict_prop'])
def test_update_empty_dict_property_by_replace_op(self):
art = self._create_artifact('withprops')
self.assertIsNone(art['dict_prop'])
data = [{'op': 'replace', 'value': "don't panic",
'path': '/dict_prop/foo'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data, status=400)
self.assertIn("The provided path 'dict_prop/foo' is invalid",
art_updated)
def test_update_empty_dict_property_by_remove_op(self):
art = self._create_artifact('withprops')
self.assertIsNone(art['dict_prop'])
data = [{'op': 'remove', 'path': '/dict_prop/bar_list'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data, status=400)
self.assertIn("The provided path 'dict_prop/bar_list' is invalid",
art_updated)
def test_update_dict_property_by_remove_op(self):
art = self._create_artifact(
'withprops',
data={'name': 'some art', 'version': '4.2',
'dict_prop': {'foo': "Fenchurch", 'bar_list': [42, 42]}})
self.assertEqual({'foo': 'Fenchurch', 'bar_list': [42, 42]},
art['dict_prop'])
data = [{'op': 'remove', 'path': '/dict_prop/foo'},
{'op': 'remove', 'path': '/dict_prop/bar_list/1'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual({'bar_list': [42]}, art_updated['dict_prop'])
# now delete the whole dict
data = [{'op': 'remove', 'path': '/dict_prop'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertIsNone(art_updated['dict_prop'])
@unittest.skip("Skipping due to a know bug")
def test_update_dict_property_change_values(self):
art = self._create_artifact(
'withprops', data={'name': 'some art', 'version': '4.2',
'dict_prop_strval':
{'foo': 'Fenchurch', 'bar': 'no value'}})
self.assertEqual({'foo': 'Fenchurch', 'bar': 'no value'},
art['dict_prop_strval'])
new_data = [{'op': 'replace', 'path': '/dict_prop_strval',
'value': {'new-foo': 'Arthur Dent'}}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=new_data)
self.assertEqual({'new-foo': 'Arthur Dent'},
art_updated['dict_prop_strval'])
def test_update_array_property_by_remove_op(self):
art = self._create_artifact(
'withprops', data={'name': 'some art',
'version': '4.2',
'prop_list': [1, 2, 3]})
self.assertEqual([1, 2, 3], art['prop_list'])
data = [{'op': 'remove', 'path': '/prop_list/0'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual([2, 3], art_updated['prop_list'])
# remove last element
data = [{'op': 'remove', 'path': '/prop_list/-'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual([2], art_updated['prop_list'])
# now delete the whole array
data = [{'op': 'remove', 'path': '/prop_list'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertIsNone(art_updated['prop_list'])
def test_update_array_property_by_add_op(self):
art = self._create_artifact(
'withprops', data={'name': 'some art',
'version': '4.2'})
self.assertIsNone(art['prop_list'])
data = [{'op': 'add', 'path': '/prop_list', 'value': [2, 12, 0, 6]}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'], data=data)
self.assertEqual([2, 12, 0, 6], art_updated['prop_list'])
data = [{'op': 'add', 'path': '/prop_list/2', 'value': 85}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'], data=data)
self.assertEqual([2, 12, 85, 0, 6], art_updated['prop_list'])
# add where path='/array/-' means append to the end
data = [{'op': 'add', 'path': '/prop_list/-', 'value': 7}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'], data=data)
self.assertEqual([2, 12, 85, 0, 6, 7], art_updated['prop_list'])
# an attempt to add an element to unexistant position should result in
# 400
self.assertEqual(6, len(art_updated['prop_list']))
bad_index_data = [{'op': 'add', 'path': '/prop_list/11',
'value': 42}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=bad_index_data,
status=400)
self.assertIn("The provided path 'prop_list/11' is invalid",
art_updated)
def test_update_dict_property_by_add_op(self):
art = self._create_artifact("withprops")
self.assertIsNone(art['dict_prop'])
data = [{'op': 'add', 'path': '/dict_prop/foo', 'value': "some value"}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual({"foo": "some value"}, art_updated['dict_prop'])
def test_update_empty_array_property_by_add_op(self):
"""Test jsonpatch add.
According to RFC 6902:
* if the array is empty, '/array/0' is a valid path
"""
create_data = {'name': 'new artifact',
'version': '4.2'}
art = self._create_artifact('withprops', data=create_data)
self.assertIsNone(art['prop_list'])
data = [{'op': 'add', 'path': '/prop_list/0', 'value': 3}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual([3], art_updated['prop_list'])
def test_update_tuple_property_by_index(self):
art = self._create_artifact(
'withprops', data={'name': 'some art',
'version': '4.2',
'tuple_prop': [1, False]})
self.assertEqual([1, False], art['tuple_prop'])
data = [{'op': 'replace', 'value': True,
'path': '/tuple_prop/1'},
{'op': 'replace', 'value': 2,
'path': '/tuple_prop/0'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertEqual([2, True], art_updated['tuple_prop'])
def test_update_artifact(self):
art = self._create_artifact('noprop')
self.assertEqual('artifact-1', art['name'])
art_updated = self._check_artifact_patch(
'/noprop/v1/%s' % art['id'],
data=[{'op': 'replace', 'value': '0.0.9', 'path': '/version'}])
self.assertEqual('0.0.9', art_updated['version'])
def test_update_artifact_properties(self):
art = self._create_artifact('withprops')
for prop in ['prop1', 'prop2']:
self.assertIsNone(art[prop])
data = [{'op': 'replace', 'value': 'some value',
'path': '/prop1'}]
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'], data=data)
self.assertEqual('some value', art_updated['prop1'])
def test_update_artifact_remove_property(self):
artifact_data = {'name': 'artifact-1',
'version': '12',
'tags': ['gagaga', 'sesese'],
'prop1': 'Arthur Dent',
'prop2': 42}
art = self._create_artifact('withprops', data=artifact_data)
data = [{'op': 'remove', 'path': '/prop1'}]
art_updated = self._check_artifact_patch('/withprops/v1/%s'
% art['id'],
data=data)
self.assertIsNone(art_updated['prop1'])
self.assertEqual(42, art_updated['prop2'])
def test_update_wrong_property_type(self):
art = self._create_artifact('withprops')
for prop in ['prop2', 'prop2']:
self.assertIsNone(art[prop])
data = [{'op': 'replace', 'value': 123, 'path': '/prop1'}]
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'], data=data, status=400)
self.assertIn("Property 'prop1' may not have value '123'", art_updated)
def test_update_multiple_properties(self):
with_prop_art = self._create_artifact('withprops')
data = [{'op': 'replace',
'path': '/prop1',
'value': 'some value'},
{'op': 'replace',
'path': '/prop2',
'value': 42}]
updated = self._check_artifact_patch(
'/withprops/v1/%s' % with_prop_art['id'], data=data)
self.assertEqual('some value', updated['prop1'])
self.assertEqual(42, updated['prop2'])
def test_create_artifact_with_dependency(self):
no_prop_art = self._create_artifact('noprop')
art = self._create_artifact(
'withprops',
data={"name": "name", "version": "42",
"depends_on": no_prop_art['id']})
self.assertEqual(no_prop_art['id'], art['depends_on']['id'])
self.assertEqual(no_prop_art['name'], art['depends_on']['name'])
def test_create_artifact_dependency_list(self):
no_prop_art1 = self._create_artifact('noprop')
no_prop_art2 = self._create_artifact('noprop')
art = self._create_artifact(
'withprops',
data={"name": "name", "version": "42",
"depends_on_list": [no_prop_art1['id'], no_prop_art2['id']]})
self.assertEqual(2, len(art['depends_on_list']))
self.assertEqual([no_prop_art1['id'], no_prop_art2['id']],
map(lambda x: x['id'], art['depends_on_list']))
def test_create_dependency_list_same_id(self):
no_prop_art = self._create_artifact('noprop')
res = self._create_artifact(
'withprops',
data={"name": "name", "version": "42",
"depends_on_list": [no_prop_art['id'],
no_prop_art['id']]}, status=400)
self.assertIn("Items have to be unique", res)
def test_create_artifact_bad_dependency_format(self):
"""Invalid dependencies creation.
Dependencies should be passed:
* as a list of ids if param is an ArtifactReferenceList
* as an id if param is an ArtifactReference
"""
no_prop_art = self._create_artifact('noprop')
art = self._check_artifact_post(
'/withprops/v1/drafts',
{"name": "name", "version": "42",
"depends_on": [no_prop_art['id']]}, status=400)
self.assertIn('Not a valid value type', art)
art = self._check_artifact_post(
'/withprops/v1.0/drafts',
{"name": "name", "version": "42",
"depends_on_list": no_prop_art['id']}, status=400)
self.assertIn('object is not iterable', art)
def test_update_dependency(self):
no_prop_art = self._create_artifact('noprop')
no_prop_art1 = self._create_artifact('noprop')
with_prop_art = self._create_artifact('withprops')
data = [{'op': 'replace',
'path': '/depends_on',
'value': no_prop_art['id']}]
updated = self._check_artifact_patch(
'/withprops/v1/%s' % with_prop_art['id'], data=data)
self.assertEqual(no_prop_art['id'], updated['depends_on']['id'])
self.assertEqual(no_prop_art['name'], updated['depends_on']['name'])
data = [{'op': 'replace',
'path': '/depends_on',
'value': no_prop_art1['id']}]
# update again and make sure it changes
updated = self._check_artifact_patch(
'/withprops/v1/%s' % with_prop_art['id'], data=data)
self.assertEqual(no_prop_art1['id'], updated['depends_on']['id'])
self.assertEqual(no_prop_art1['name'], updated['depends_on']['name'])
def test_update_dependency_circular_reference(self):
with_prop_art = self._create_artifact('withprops')
data = [{'op': 'replace',
'path': '/depends_on',
'value': [with_prop_art['id']]}]
not_updated = self._check_artifact_patch(
'/withprops/v1/%s' % with_prop_art['id'], data=data, status=400)
self.assertIn('Artifact with a circular dependency can not be created',
not_updated)
def test_publish_artifact(self):
art = self._create_artifact('withprops')
# now create dependency
no_prop_art = self._create_artifact('noprop')
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'],
data=[{'value': no_prop_art['id'],
'op': 'replace',
'path': '/depends_on'}])
self.assertTrue(art_updated['depends_on'] != [])
# artifact can't be published if any dependency is in non-active state
res = self._check_artifact_post(
'/withprops/v1/%s/publish' % art['id'], {}, status=400)
self.assertIn("Not all dependencies are in 'active' state", res)
# after you publish the dependency -> artifact can be published
dep_published = self._check_artifact_post(
'/noprop/v1/%s/publish' % no_prop_art['id'], {}, status=200)
self.assertEqual('active', dep_published['state'])
art_published = self._check_artifact_post(
'/withprops/v1.0/%s/publish' % art['id'], {}, status=200)
self.assertEqual('active', art_published['state'])
def test_no_mutable_change_in_published_state(self):
art = self._create_artifact('withprops')
no_prop_art = self._create_artifact('noprop')
no_prop_other = self._create_artifact('noprop')
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'],
data=[{'value': no_prop_art['id'],
'op': 'replace',
'path': '/depends_on'}])
self.assertEqual(no_prop_art['id'], art_updated['depends_on']['id'])
# now change dependency to some other artifact
art_updated = self._check_artifact_patch(
'/withprops/v1/%s' % art['id'],
data=[{'value': no_prop_other['id'],
'op': 'replace',
'path': '/depends_on'}])
self.assertEqual(no_prop_other['id'], art_updated['depends_on']['id'])
# publish dependency
dep_published = self._check_artifact_post(
'/noprop/v1/%s/publish' % no_prop_other['id'], {}, status=200)
self.assertEqual('active', dep_published['state'])
# publish artifact
art_published = self._check_artifact_post(
'/withprops/v1.0/%s/publish' % art['id'], {}, status=200)
self.assertEqual('active', art_published['state'])
# try to change dependency, should fail as already published
res = self._check_artifact_patch(
'/withprops/v1/%s' % art_published['id'],
data=[{'op': 'remove', 'path': '/depends_on'}], status=400)
self.assertIn('Attempt to set value of immutable property', res)
def test_create_artifact_empty_body(self):
self._check_artifact_post('/noprop/v1.0/drafts', {}, 400)
def test_create_artifact_insufficient_arguments(self):
self._check_artifact_post('/noprop/v1.0/drafts',
{'name': 'some name, no version'},
status=400)
def test_create_artifact_no_such_version(self):
"""Creation impossible without specifying a correct version.
An attempt to create an artifact out of existing plugin but with
a wrong version should result in
400 BadRequest 'No such plugin has been loaded'
"""
# make sure there is no such artifact noprop
self._check_artifact_get('/noprop/v0.0.9', 400)
artifact_data = {'name': 'artifact-1',
'version': '12'}
msg = self._check_artifact_post('/noprop/v0.0.9/drafts',
artifact_data,
status=400)
self.assertIn("No plugin for 'noprop v 0.0.9' has been loaded",
msg)
def test_create_artifact_no_type_version_specified(self):
"""Creation impossible without specifying a version.
It should not be possible to create an artifact out of existing plugin
without specifying any version
"""
artifact_data = {'name': 'artifact-1',
'version': '12'}
self._check_artifact_post('/noprop/drafts', artifact_data, 404)
def test_create_artifact_no_properties(self):
"""Create an artifact with minimum parameters"""
artifact_data = {'name': 'artifact-1',
'version': '12'}
artifact = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
# verify that all fields have the values expected
expected_artifact = {
'state': 'creating',
'name': 'artifact-1',
'version': '12.0.0',
'tags': [],
'visibility': 'private',
'type_name': 'WithProps',
'type_version': '1.0',
'prop1': None,
'prop2': None
}
for key, value in expected_artifact.items():
self.assertEqual(artifact[key], value, key)
def test_create_artifact_with_properties(self):
"""Create an artifact (with two deployer-defined properties)"""
artifact_data = {'name': 'artifact-1',
'version': '12',
'tags': ['gagaga', 'sesese'],
'prop1': 'Arthur Dent',
'prop2': 42}
artifact = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
expected_artifact = {
'state': 'creating',
'name': 'artifact-1',
'version': '12.0.0',
'tags': ['gagaga', 'sesese'],
'visibility': 'private',
'type_name': 'WithProps',
'type_version': '1.0',
'prop1': 'Arthur Dent',
'prop2': 42
}
for key, value in expected_artifact.items():
self.assertEqual(artifact[key], value, key)
def test_create_artifact_not_all_properties(self):
"""Create artifact with minimal properties.
Checks that it is possible to create an artifact by passing all
required properties but omitting some not required
"""
artifact_data = {'name': 'artifact-1',
'version': '12',
'visibility': 'private',
'tags': ['gagaga', 'sesese'],
'prop1': 'i am ok'}
artifact = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
expected_artifact = {
'state': 'creating',
'name': 'artifact-1',
'version': '12.0.0',
'tags': ['gagaga', 'sesese'],
'visibility': 'private',
'type_name': 'WithProps',
'type_version': '1.0',
'prop1': 'i am ok',
'prop2': None}
for key, value in expected_artifact.items():
self.assertEqual(artifact[key], value, key)
# now check creation with no properties specified
for prop in ['prop1', 'prop2']:
artifact_data.pop(prop, '')
artifact = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
for prop in ['prop1', 'prop2']:
self.assertIsNone(artifact[prop])
def test_create_artifact_invalid_properties(self):
"""Any attempt to pass invalid properties should result in 400"""
artifact_data = {'name': 'artifact-1',
'version': '12',
'prop1': 1}
res = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data,
status=400)
self.assertIn("Property 'prop1' may not have value '1'", res)
artifact_data.pop('prop1')
artifact_data['nosuchprop'] = "Random"
res = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data,
status=400)
self.assertIn("Artifact has no property nosuchprop", res)
def test_upload_file(self):
# Upload some data to an artifact
art = self._create_artifact('withblob')
headers = self._headers({'Content-Type': 'application/octet-stream'})
self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'],
headers=headers,
data='ZZZZZ', status=200)
def test_upload_list_files(self):
art = self._create_artifact('withblob')
headers = self._headers({'Content-Type': 'application/octet-stream'})
self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'],
headers=headers,
data='ZZZZZ', status=200)
self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'],
headers=headers,
data='YYYYY', status=200)
def test_download_file(self):
# Download some data from an artifact
art = self._create_artifact('withblob')
artifact_id = art['id']
headers = self._headers({'Content-Type': 'application/octet-stream'})
self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'],
headers=headers,
data='ZZZZZ', status=200)
art = self._check_artifact_get('/withblob/%s' % artifact_id)
self.assertEqual(artifact_id, art['id'])
self.assertIn('download_link', art['blob1'])
data = self._check_artifact_get(
'/withblob/%s/blob1/download' % art['id'])
self.assertEqual('ZZZZZ', data)
def test_limit(self):
artifact_data = {'name': 'artifact-1',
'version': '12'}
self._check_artifact_post('/withprops/v1/drafts',
artifact_data)
artifact_data = {'name': 'artifact-1',
'version': '13'}
self._check_artifact_post('/withprops/v1/drafts',
artifact_data)
result = self._check_artifact_get('/withprops/v1/drafts')
self.assertEqual(2, len(result))
result = self._check_artifact_get('/withprops/v1/drafts?limit=1')
self.assertEqual(1, len(result))
def _check_sorting_order(self, expected, actual):
for e, a in zip(expected, actual):
self.assertEqual(e['name'], a['name'])
self.assertEqual(e['version'], a['version'])
self.assertEqual(e['prop1'], a['prop1'])
def test_sort(self):
artifact_data = {'name': 'artifact-1',
'version': '12',
'prop1': 'lala'}
art1 = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
artifact_data = {'name': 'artifact-2',
'version': '13',
'prop1': 'lala'}
art2 = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
artifact_data = {'name': 'artifact-3',
'version': '13',
'prop1': 'tutu'}
art3 = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
artifact_data = {'name': 'artifact-4',
'version': '13',
'prop1': 'hyhy'}
art4 = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
artifact_data = {'name': 'artifact-5',
'version': '13',
'prop1': 'bebe'}
art5 = self._check_artifact_post('/withprops/v1.0/drafts',
artifact_data)
result = self._check_artifact_get(
'/withprops/v1.0/drafts?sort=name')
self.assertEqual(5, len(result))
# default direction is 'desc'
expected = [art5, art4, art3, art2, art1]
self._check_sorting_order(expected, result)
result = self._check_artifact_get(
'/withprops/v1.0/drafts?sort=name:asc')
self.assertEqual(5, len(result))
expected = [art1, art2, art3, art4, art5]
self._check_sorting_order(expected, result)
result = self._check_artifact_get(
'/withprops/v1.0/drafts?sort=version:asc,prop1')
self.assertEqual(5, len(result))
expected = [art1, art3, art2, art4, art5]
self._check_sorting_order(expected, result)
def test_update_property(self):
data = {'name': 'an artifact',
'version': '42'}
art = self._create_artifact('withprops', data=data)
# update single integer property via PUT
upd = self._check_artifact_put('/withprops/v1.0/%s/prop2' % art['id'],
data={'data': 15})
self.assertEqual(15, upd['prop2'])
# create list property via PUT
upd = self._check_artifact_put(
'/withprops/v1.0/%s/tuple_prop' % art['id'],
data={'data': [42, True]})
self.assertEqual([42, True], upd['tuple_prop'])
# change list property via PUT
upd = self._check_artifact_put(
'/withprops/v1.0/%s/tuple_prop/0' % art['id'], data={'data': 24})
self.assertEqual([24, True], upd['tuple_prop'])
# append to list property via POST
upd = self._check_artifact_post(
'/withprops/v1.0/%s/prop_list' % art['id'], data={'data': [11]},
status=200)
self.assertEqual([11], upd['prop_list'])
# append to list property via POST
upd = self._check_artifact_post(
'/withprops/v1.0/%s/prop_list/-' % art['id'],
status=200, data={'data': 10})
self.assertEqual([11, 10], upd['prop_list'])
def test_bad_update_property(self):
data = {'name': 'an artifact',
'version': '42'}
art = self._create_artifact('withprops', data=data)
# try to update non existant property
upd = self._check_artifact_put(
'/withprops/v1.0/%s/nosuchprop' % art['id'],
data={'data': 'wont be set'}, status=400)
self.assertIn('Artifact has no property nosuchprop', upd)
# try to pass wrong property value
upd = self._check_artifact_put(
'/withprops/v1.0/%s/tuple_prop' % art['id'],
data={'data': ['should be an int', False]}, status=400)
self.assertIn("Property 'tuple_prop[0]' may not have value", upd)
# try to pass bad body (not a valid json)
upd = self._check_artifact_put(
'/withprops/v1.0/%s/tuple_prop' % art['id'], data="not a json",
status=400)
self.assertIn("Invalid json body", upd)
# try to pass json body invalid under schema
upd = self._check_artifact_put(
'/withprops/v1.0/%s/tuple_prop' % art['id'],
data={"bad": "schema"}, status=400)
self.assertIn("Invalid json body", upd)
def test_update_different_depths_levels(self):
data = {'name': 'an artifact',
'version': '42'}
art = self._create_artifact('withprops', data=data)
upd = self._check_artifact_post(
'/withprops/v1.0/%s/dict_prop' % art['id'],
data={'data': {'foo': 'some value'}}, status=200)
self.assertEqual({'foo': 'some value'}, upd['dict_prop'])
upd = self._check_artifact_post(
'/withprops/v1.0/%s/dict_prop/bar_list' % art['id'],
data={'data': [5]}, status=200)
self.assertEqual({'foo': 'some value', 'bar_list': [5]},
upd['dict_prop'])
upd = self._check_artifact_post(
'/withprops/v1.0/%s/dict_prop/bar_list/0' % art['id'],
data={'data': 15}, status=200)
self.assertEqual({'foo': 'some value', 'bar_list': [5, 15]},
upd['dict_prop'])
# try to attempt dict_property by non existant path
upd = self._check_artifact_post(
'/withprops/v1.0/%s/dict_prop/bar_list/nosuchkey' % art['id'],
data={'data': 15}, status=400)
def test_list_artifact_types(self):
actual = {
u'artifact_types': [
{u'displayed_name': u'NoProp',
u'type_name': u'NoProp',
u'versions':
[{u'id': u'v0.5',
u'link': u'http://127.0.0.1:%d/v3/artifacts/noprop/v0.5'
% self.api_port},
{u'id': u'v1.0',
u'link': u'http://127.0.0.1:%d/v3/artifacts/noprop/v1.0'
% self.api_port}]},
{u'displayed_name': u'WithBlob',
u'type_name': u'WithBlob',
u'versions':
[{u'id': u'v1.0',
u'link':
u'http://127.0.0.1:%d/v3/artifacts/withblob/v1.0'
% self.api_port}]},
{u'displayed_name': u'WithProps',
u'type_name': u'WithProps',
u'versions':
[{u'id': u'v1.0',
u'link':
u'http://127.0.0.1:%d/v3/artifacts/withprops/v1.0'
% self.api_port}]}]}
response = self._check_artifact_get("", status=200)
response[u'artifact_types'].sort(key=lambda x: x[u'type_name'])
for artifact_type in response[u'artifact_types']:
artifact_type[u'versions'].sort(key=lambda x: x[u'id'])
self.assertEqual(actual, response)
| |
"""
FEZ graphics types
"""
from __future__ import print_function
from xnb_parse.file_formats.xml_utils import ET
from xnb_parse.xna_types.xna_graphics import (Texture2D, FORMAT_COLOR, get_surface_format, VERSION_31, VERSION_40,
FORMAT4_COLOR)
class ArtObject(object):
def __init__(self, name, cubemap_path, size, geometry, actor_type, no_silhouette, laser_outlets):
self.name = name
self.cubemap_path = cubemap_path
self.size = size
self.geometry = geometry
self.actor_type = actor_type
self.no_silhouette = no_silhouette
self.laser_outlets = laser_outlets
def __str__(self):
return "ArtObject '{}' t:'{}' s:{} g:{}".format(self.name, self.cubemap_path, self.size,
len(self.geometry.vertices))
def xml(self, parent=None):
if parent is None:
root = ET.Element('ArtObject')
else:
root = ET.SubElement(parent, 'ArtObject')
root.set('name', self.name)
root.set('cubemapPath', self.cubemap_path)
root.set('noSilhouette', str(self.no_silhouette))
self.size.xml(ET.SubElement(root, 'Size'))
if self.actor_type is not None:
root.set('actorType', str(self.actor_type))
if self.geometry is not None:
self.geometry.xml(root)
if self.laser_outlets is not None:
self.laser_outlets.xml(root, 'LaserOutlets')
return root
class ArtObjectPC(object):
def __init__(self, name, cubemap, size, geometry, actor_type, no_silhouette):
self.name = name
self.cubemap = cubemap
self.size = size
self.geometry = geometry
self.actor_type = actor_type
self.no_silhouette = no_silhouette
def __str__(self):
return "ArtObjectPC '{}' s:{} g:{}".format(self.name, self.size, len(self.geometry.vertices))
def xml(self, parent=None):
if parent is None:
root = ET.Element('ArtObject')
else:
root = ET.SubElement(parent, 'ArtObject')
root.set('name', self.name)
root.set('noSilhouette', str(self.no_silhouette))
self.size.xml(ET.SubElement(root, 'Size'))
if self.actor_type is not None:
root.set('actorType', str(self.actor_type))
if self.geometry is not None:
self.geometry.xml(root)
return root
def export(self, filename):
self.cubemap.export(filename)
class ShaderInstancedIndexedPrimitives(object):
__slots__ = ('primitive_type', 'vertices', 'indices')
def __init__(self, primitive_type, vertices, indices):
self.primitive_type = primitive_type
self.vertices = vertices
self.indices = indices
def __str__(self):
return "ShaderInstancedIndexedPrimitives t:{} v:{} i:{}".format(self.primitive_type, len(self.vertices),
len(self.indices))
def xml(self, parent):
root = ET.SubElement(parent, 'ShaderInstancedIndexedPrimitives')
if self.primitive_type is not None:
root.set('type', str(self.primitive_type))
if self.vertices is not None:
self.vertices.xml(root, 'Vertices')
if self.indices is not None:
self.indices.xml(root, 'Indices', 'Index')
return root
class VertexPositionNormalTextureInstance(object):
__slots__ = ('position', 'normal', 'texture_coord')
def __init__(self, position, normal, texture_coord):
self.position = position
self.normal = normal
self.texture_coord = texture_coord
def __str__(self):
return "VertexPositionNormalTextureInstance p:{} n:{} c:{}".format(self.position, self.normal,
self.texture_coord)
def xml(self, parent):
root = ET.SubElement(parent, 'VertexPositionNormalTextureInstance')
self.position.xml(ET.SubElement(root, 'Position'))
normal_tag = ET.SubElement(root, 'Normal')
normal_tag.text = str(self.normal)
self.texture_coord.xml(ET.SubElement(root, 'TextureCoord'))
return root
class NpcMetadata(object):
def __init__(self, walk_speed, avoids_gomez, sound_path, sound_actions):
self.walk_speed = walk_speed
self.avoids_gomez = avoids_gomez
self.sound_path = sound_path
self.sound_actions = sound_actions
def __str__(self):
return "NpcMetadata s:{} a:{}".format(self.sound_path, len(self.sound_actions))
def xml(self, parent=None):
if parent is None:
root = ET.Element('NpcMetadata')
else:
root = ET.SubElement(parent, 'NpcMetadata')
root.set('avoidsGomez', str(self.avoids_gomez))
root.set('walkSpeed', str(self.walk_speed))
if self.sound_path is not None:
root.set('soundPath', self.sound_path)
if self.sound_actions is not None:
self.sound_actions.xml(root, 'SoundActions')
return root
class AnimatedTexture(object):
surface_format = get_surface_format(VERSION_31, FORMAT_COLOR)
def __init__(self, width, height, actual_width, actual_height, frames):
self.width = width
self.height = height
self.actual_width = actual_width
self.actual_height = actual_height
self.frames = frames
def __str__(self):
return "AnimatedTexture d:{}x{} a:{}x{} f:{}".format(self.width, self.height, self.actual_width,
self.actual_height, len(self.frames))
def xml(self, parent=None):
if parent is None:
root = ET.Element('AnimatedTexture')
else:
root = ET.SubElement(parent, 'AnimatedTexture')
root.set('width', str(self.width))
root.set('height', str(self.height))
root.set('actualWidth', str(self.actual_width))
root.set('actualHeight', str(self.actual_height))
if self.frames is not None:
self.frames.xml(root, 'Frames')
return root
def export(self, filename):
if self.frames is not None:
self.export_single(filename)
def export_each(self, filename):
for i, cur_frame in enumerate(self.frames):
texture = Texture2D(self.surface_format, self.width, self.height, [cur_frame.data])
cur_filename = "{}_ani\\{}".format(filename, i)
texture.export(cur_filename)
def export_single(self, filename):
texture_data = bytearray()
for cur_frame in self.frames:
texture_data.extend(cur_frame.data)
texture = Texture2D(self.surface_format, self.width, self.height * len(self.frames), [texture_data])
texture.export(filename + '.ani')
class AnimatedTexturePC(object):
surface_format = get_surface_format(VERSION_40, FORMAT4_COLOR)
def __init__(self, width, height, actual_width, actual_height, data, frames):
self.width = width
self.height = height
self.actual_width = actual_width
self.actual_height = actual_height
self.data = data
self.frames = frames
def __str__(self):
return "AnimatedTexturePC d:{}x{} a:{}x{} f:{}".format(self.width, self.height, self.actual_width,
self.actual_height, len(self.frames))
def xml(self, parent=None):
if parent is None:
root = ET.Element('AnimatedTexturePC')
else:
root = ET.SubElement(parent, 'AnimatedTexturePC')
root.set('width', str(self.width))
root.set('height', str(self.height))
root.set('actualWidth', str(self.actual_width))
root.set('actualHeight', str(self.actual_height))
if self.frames is not None:
self.frames.xml(root, 'Frames')
return root
def export(self, filename):
if self.data is not None:
texture = Texture2D(self.surface_format, self.width, self.height, [self.data])
texture.export(filename + '.ani')
class Frame(object):
def __init__(self, duration, data):
self.duration = duration
self.data = data
def __str__(self):
return "Frame d:{} s:{}".format(self.duration, len(self.data))
def xml(self, parent):
root = ET.SubElement(parent, 'Frame')
if self.duration is not None:
root.set('duration', str(self.duration))
return root
class FramePC(object):
def __init__(self, duration, rectangle):
self.duration = duration
self.rectangle = rectangle
def __str__(self):
return "FramePC d:{} r:{}".format(self.duration, self.rectangle)
def xml(self, parent):
root = ET.SubElement(parent, 'FramePC')
if self.duration is not None:
root.set('duration', str(self.duration))
if self.rectangle is not None:
self.rectangle.xml(root)
return root
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.execution_config import ExecutionConfig
from pyflink.common.restart_strategy import RestartStrategies
from pyflink.datastream.checkpoint_config import CheckpointConfig
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.state_backend import _from_j_state_backend
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import load_java_class
__all__ = ['StreamExecutionEnvironment']
class StreamExecutionEnvironment(object):
"""
The StreamExecutionEnvironment is the context in which a streaming program is executed. A
*LocalStreamEnvironment* will cause execution in the attached JVM, a
*RemoteStreamEnvironment* will cause execution on a remote setup.
The environment provides methods to control the job execution (such as setting the parallelism
or the fault tolerance/checkpointing parameters) and to interact with the outside world (data
access).
"""
def __init__(self, j_stream_execution_environment):
self._j_stream_execution_environment = j_stream_execution_environment
def get_config(self):
"""
Gets the config object.
:return: The :class:`~pyflink.common.ExecutionConfig` object.
"""
return ExecutionConfig(self._j_stream_execution_environment.getConfig())
def set_parallelism(self, parallelism):
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as map,
batchReduce) to run with x parallel instances. This method overrides the
default parallelism for this environment. The
*LocalStreamEnvironment* uses by default a value equal to the
number of hardware contexts (CPU cores / threads). When executing the
program via the command line client from a JAR file, the default degree
of parallelism is the one configured for that setup.
:param parallelism: The parallelism.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism):
"""
Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
is 32767.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program,
with 0 < maxParallelism <= 2^15 - 1.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setMaxParallelism(max_parallelism)
return self
def get_parallelism(self):
"""
Gets the parallelism with which operation are executed by default.
Operations can individually override this value to use a specific
parallelism.
:return: The parallelism used by operations, unless they override that value.
"""
return self._j_stream_execution_environment.getParallelism()
def get_max_parallelism(self):
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_stream_execution_environment.getMaxParallelism()
def set_buffer_timeout(self, timeout_millis):
"""
Sets the maximum time frequency (milliseconds) for the flushing of the
output buffers. By default the output buffers flush frequently to provide
low latency and to aid smooth developer experience. Setting the parameter
can result in three logical modes:
- A positive integer triggers flushing periodically by that integer
- 0 triggers flushing after every record thus minimizing latency
- -1 triggers flushing only when the output buffer is full thus maximizing throughput
:param timeout_millis: The maximum time between two output flushes.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setBufferTimeout(timeout_millis)
return self
def get_buffer_timeout(self):
"""
Gets the maximum time frequency (milliseconds) for the flushing of the
output buffers. For clarification on the extremal values see
:func:`set_buffer_timeout`.
:return: The timeout of the buffer.
"""
return self._j_stream_execution_environment.getBufferTimeout()
def disable_operator_chaining(self):
"""
Disables operator chaining for streaming operators. Operator chaining
allows non-shuffle operations to be co-located in the same thread fully
avoiding serialization and de-serialization.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.disableOperatorChaining()
return self
def is_chaining_enabled(self):
"""
Returns whether operator chaining is enabled.
:return: True if chaining is enabled, false otherwise.
"""
return self._j_stream_execution_environment.isChainingEnabled()
def get_checkpoint_config(self):
"""
Gets the checkpoint config, which defines values like checkpoint interval, delay between
checkpoints, etc.
:return: The :class:`~pyflink.datastream.CheckpointConfig`.
"""
j_checkpoint_config = self._j_stream_execution_environment.getCheckpointConfig()
return CheckpointConfig(j_checkpoint_config)
def enable_checkpointing(self, interval, mode=None):
"""
Enables checkpointing for the streaming job. The distributed state of the streaming
dataflow will be periodically snapshotted. In case of a failure, the streaming
dataflow will be restarted from the latest completed checkpoint.
The job draws checkpoints periodically, in the given interval. The system uses the
given :class:`~pyflink.datastream.CheckpointingMode` for the checkpointing ("exactly once"
vs "at least once"). The state will be stored in the configured state backend.
.. note::
Checkpointing iterative streaming dataflows in not properly supported at
the moment. For that reason, iterative jobs will not be started if used
with enabled checkpointing.
Example:
::
>>> env.enable_checkpointing(300000, CheckpointingMode.AT_LEAST_ONCE)
:param interval: Time interval between state checkpoints in milliseconds.
:param mode: The checkpointing mode, selecting between "exactly once" and "at least once"
guaranteed.
:return: This object.
"""
if mode is None:
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableCheckpointing(interval)
else:
j_checkpointing_mode = CheckpointingMode._to_j_checkpointing_mode(mode)
self._j_stream_execution_environment.enableCheckpointing(
interval,
j_checkpointing_mode)
return self
def get_checkpoint_interval(self):
"""
Returns the checkpointing interval or -1 if checkpointing is disabled.
Shorthand for get_checkpoint_config().get_checkpoint_interval().
:return: The checkpointing interval or -1.
"""
return self._j_stream_execution_environment.getCheckpointInterval()
def get_checkpointing_mode(self):
"""
Returns the checkpointing mode (exactly-once vs. at-least-once).
Shorthand for get_checkpoint_config().get_checkpointing_mode().
:return: The :class:`~pyflink.datastream.CheckpointingMode`.
"""
j_checkpointing_mode = self._j_stream_execution_environment.getCheckpointingMode()
return CheckpointingMode._from_j_checkpointing_mode(j_checkpointing_mode)
def get_state_backend(self):
"""
Gets the state backend that defines how to store and checkpoint state.
.. seealso:: :func:`set_state_backend`
:return: The :class:`StateBackend`.
"""
j_state_backend = self._j_stream_execution_environment.getStateBackend()
return _from_j_state_backend(j_state_backend)
def set_state_backend(self, state_backend):
"""
Sets the state backend that describes how to store and checkpoint operator state. It
defines both which data structures hold state during execution (for example hash tables,
RockDB, or other data stores) as well as where checkpointed data will be persisted.
The :class:`~pyflink.datastream.MemoryStateBackend` for example maintains the state in heap
memory, as objects. It is lightweight without extra dependencies, but can checkpoint only
small states(some counters).
In contrast, the :class:`~pyflink.datastream.FsStateBackend` stores checkpoints of the state
(also maintained as heap objects) in files. When using a replicated file system (like HDFS,
S3, MapR FS, Alluxio, etc) this will guarantee that state is not lost upon failures of
individual nodes and that streaming program can be executed highly available and strongly
consistent(assuming that Flink is run in high-availability mode).
The build-in state backend includes:
:class:`~pyflink.datastream.MemoryStateBackend`,
:class:`~pyflink.datastream.FsStateBackend`
and :class:`~pyflink.datastream.RocksDBStateBackend`.
.. seealso:: :func:`get_state_backend`
Example:
::
>>> env.set_state_backend(RocksDBStateBackend("file://var/checkpoints/"))
:param state_backend: The :class:`StateBackend`.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setStateBackend(state_backend._j_state_backend)
return self
def set_restart_strategy(self, restart_strategy_configuration):
"""
Sets the restart strategy configuration. The configuration specifies which restart strategy
will be used for the execution graph in case of a restart.
Example:
::
>>> env.set_restart_strategy(RestartStrategies.no_restart())
:param restart_strategy_configuration: Restart strategy configuration to be set.
:return:
"""
self._j_stream_execution_environment.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
def get_restart_strategy(self):
"""
Returns the specified restart strategy configuration.
:return: The restart strategy configuration to be used.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_stream_execution_environment.getRestartStrategy())
def add_default_kryo_serializer(self, type_class_name, serializer_class_name):
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> env.add_default_kryo_serializer("com.aaa.bbb.TypeClass", "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.addDefaultKryoSerializer(type_clz, j_serializer_clz)
def register_type_with_kryo_serializer(self, type_class_name, serializer_class_name):
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> env.register_type_with_kryo_serializer("com.aaa.bbb.TypeClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.registerTypeWithKryoSerializer(
type_clz, j_serializer_clz)
def register_type(self, type_class_name):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> env.register_type("com.aaa.bbb.TypeClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_stream_execution_environment.registerType(type_clz)
def set_stream_time_characteristic(self, characteristic):
"""
Sets the time characteristic for all streams create from this environment, e.g., processing
time, event time, or ingestion time.
If you set the characteristic to IngestionTime of EventTime this will set a default
watermark update interval of 200 ms. If this is not applicable for your application
you should change it using
:func:`pyflink.common.ExecutionConfig.set_auto_watermark_interval`.
Example:
::
>>> env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
:param characteristic: The time characteristic, which could be
:data:`TimeCharacteristic.ProcessingTime`,
:data:`TimeCharacteristic.IngestionTime`,
:data:`TimeCharacteristic.EventTime`.
"""
j_characteristic = TimeCharacteristic._to_j_time_characteristic(characteristic)
self._j_stream_execution_environment.setStreamTimeCharacteristic(j_characteristic)
def get_stream_time_characteristic(self):
"""
Gets the time characteristic.
.. seealso:: :func:`set_stream_time_characteristic`
:return: The :class:`TimeCharacteristic`.
"""
j_characteristic = self._j_stream_execution_environment.getStreamTimeCharacteristic()
return TimeCharacteristic._from_j_time_characteristic(j_characteristic)
def get_default_local_parallelism(self):
"""
Gets the default parallelism that will be used for the local execution environment.
:return: The default local parallelism.
"""
return self._j_stream_execution_environment.getDefaultLocalParallelism()
def set_default_local_parallelism(self, parallelism):
"""
Sets the default parallelism that will be used for the local execution environment.
:param parallelism: The parallelism to use as the default local parallelism.
"""
self._j_stream_execution_environment.setDefaultLocalParallelism(parallelism)
def execute(self, job_name=None):
"""
Triggers the program execution. The environment will execute all parts of
the program that have resulted in a "sink" operation. Sink operations are
for example printing results or forwarding them to a message queue.
The program execution will be logged and displayed with the provided name
:param job_name: Desired name of the job, optional.
"""
if job_name is None:
self._j_stream_execution_environment.execute()
else:
self._j_stream_execution_environment.execute(job_name)
def get_execution_plan(self):
"""
Creates the plan with which the system will execute the program, and returns it as
a String using a JSON representation of the execution data flow graph.
Note that this needs to be called, before the plan is executed.
If the compiler could not be instantiated, or the master could not
be contacted to retrieve information relevant to the execution planning,
an exception will be thrown.
:return: The execution plan of the program, as a JSON String.
"""
return self._j_stream_execution_environment.getExecutionPlan()
@staticmethod
def get_execution_environment():
"""
Creates an execution environment that represents the context in which the
program is currently executed. If the program is invoked standalone, this
method returns a local execution environment.
:return: The execution environment of the context in which the program is executed.
"""
gateway = get_gateway()
j_stream_exection_environment = gateway.jvm.org.apache.flink.streaming.api.environment\
.StreamExecutionEnvironment.getExecutionEnvironment()
return StreamExecutionEnvironment(j_stream_exection_environment)
| |
"""
fs.wrapfs.limitsizefs
=====================
An FS wrapper class for limiting the size of the underlying FS.
This module provides the class LimitSizeFS, an FS wrapper that can limit the
total size of files stored in the wrapped FS.
"""
from __future__ import with_statement
from fs.errors import *
from fs.path import *
from fs.base import FS, threading, synchronize
from fs.wrapfs import WrapFS
from fs.filelike import FileWrapper
class LimitSizeFS(WrapFS):
"""FS wrapper class to limit total size of files stored."""
def __init__(self, fs, max_size):
super(LimitSizeFS,self).__init__(fs)
if max_size < 0:
try:
max_size = fs.getmeta("total_space") + max_size
except NotMetaError:
msg = "FS doesn't report total_size; "\
"can't use negative max_size"
raise ValueError(msg)
self.max_size = max_size
self._size_lock = threading.Lock()
self._file_sizes = PathMap()
self.cur_size = self._get_cur_size()
def __getstate__(self):
state = super(LimitSizeFS,self).__getstate__()
del state["cur_size"]
del state["_size_lock"]
del state["_file_sizes"]
return state
def __setstate__(self, state):
super(LimitSizeFS,self).__setstate__(state)
self._size_lock = threading.Lock()
self._file_sizes = PathMap()
self.cur_size = self._get_cur_size()
def _get_cur_size(self,path="/"):
return sum(self.getsize(f) for f in self.walkfiles(path))
def getsyspath(self, path, allow_none=False):
# If people could grab syspaths, they could route around our
# size protection; no dice!
if not allow_none:
raise NoSysPathError(path)
return None
def open(self, path, mode="r"):
path = relpath(normpath(path))
with self._size_lock:
try:
size = self.getsize(path)
except ResourceNotFoundError:
size = 0
f = super(LimitSizeFS,self).open(path,mode)
if "w" not in mode:
self._set_file_size(path,None,1)
else:
self.cur_size -= size
size = 0
self._set_file_size(path,0,1)
return LimitSizeFile(f,mode,size,self,path)
def _set_file_size(self,path,size,incrcount=None):
try:
(cursize,count) = self._file_sizes[path]
except KeyError:
count = 0
try:
cursize = self.getsize(path)
except ResourceNotFoundError:
cursize = 0
if size is None:
size = cursize
if count is not None:
count += 1
if count == 0:
del self._file_sizes[path]
else:
self._file_sizes[path] = (size,count)
def setcontents(self, path, data, chunk_size=64*1024):
f = None
try:
f = self.open(path, 'wb')
if hasattr(data, 'read'):
chunk = data.read(chunk_size)
while chunk:
f.write(chunk)
chunk = data.read(chunk_size)
else:
f.write(data)
finally:
if f is not None:
f.close()
def _file_closed(self, path):
self._set_file_size(path,None,-1)
def _ensure_file_size(self, path, size, shrink=False):
with self._size_lock:
try:
(cur_size,_) = self._file_sizes[path]
except KeyError:
try:
cur_size = self.getsize(path)
except ResourceNotFoundError:
cur_size = 0
self._set_file_size(path,cur_size,1)
diff = size - cur_size
if diff > 0:
if self.cur_size + diff > self.max_size:
raise StorageSpaceError("write")
self.cur_size += diff
self._set_file_size(path,size)
return size
elif diff < 0 and shrink:
self.cur_size += diff
self._set_file_size(path,size)
return size
else:
return cur_size
# We force use of several base FS methods,
# since they will fall back to writing out each file
# and thus will route through our size checking logic.
def copy(self, src, dst, **kwds):
FS.copy(self,src,dst,**kwds)
def copydir(self, src, dst, **kwds):
FS.copydir(self,src,dst,**kwds)
def move(self, src, dst, **kwds):
if self.getmeta("atomic.rename",False):
if kwds.get("overwrite",False) or not self.exists(dst):
try:
self.rename(src,dst)
return
except FSError:
pass
FS.move(self, src, dst, **kwds)
def movedir(self, src, dst, **kwds):
overwrite = kwds.get("overwrite",False)
if self.getmeta("atomic.rename",False):
if kwds.get("overwrite",False) or not self.exists(dst):
try:
self.rename(src,dst)
return
except FSError:
pass
FS.movedir(self,src,dst,**kwds)
def rename(self, src, dst):
if self.getmeta("atomic.rename",False):
try:
dst_size = self._get_cur_size(dst)
except ResourceNotFoundError:
dst_size = 0
super(LimitSizeFS,self).rename(src,dst)
with self._size_lock:
self.cur_size -= dst_size
self._file_sizes.pop(src,None)
else:
if self.isdir(src):
self.movedir(src,dst)
else:
self.move(src,dst)
def remove(self, path):
with self._size_lock:
try:
(size,_) = self._file_sizes[path]
except KeyError:
size = self.getsize(path)
super(LimitSizeFS,self).remove(path)
self.cur_size -= size
self._file_sizes.pop(path,None)
def removedir(self, path, recursive=False, force=False):
# Walk and remove directories by hand, so they we
# keep the size accounting precisely up to date.
for nm in self.listdir(path):
if not force:
raise DirectoryNotEmptyError(path)
cpath = pathjoin(path,nm)
try:
if self.isdir(cpath):
self.removedir(cpath,force=True)
else:
self.remove(cpath)
except ResourceNotFoundError:
pass
super(LimitSizeFS,self).removedir(path,recursive=recursive)
def getinfo(self, path):
info = super(LimitSizeFS,self).getinfo(path)
try:
info["size"] = max(self._file_sizes[path][0],info["size"])
except KeyError:
pass
return info
def getsize(self, path):
size = super(LimitSizeFS,self).getsize(path)
try:
size = max(self._file_sizes[path][0],size)
except KeyError:
pass
return size
class LimitSizeFile(FileWrapper):
"""Filelike wrapper class for use by LimitSizeFS."""
def __init__(self, file, mode, size, fs, path):
super(LimitSizeFile,self).__init__(file,mode)
self.size = size
self.fs = fs
self.path = path
self._lock = fs._lock
@synchronize
def _write(self, data, flushing=False):
pos = self.wrapped_file.tell()
new_size = self.fs._ensure_file_size(self.path, pos+len(data))
res = super(LimitSizeFile,self)._write(data, flushing)
self.size = new_size
return res
@synchronize
def _truncate(self, size):
new_size = self.fs._ensure_file_size(self.path,size,shrink=True)
res = super(LimitSizeFile,self)._truncate(size)
self.size = new_size
return res
@synchronize
def close(self):
super(LimitSizeFile,self).close()
self.fs._file_closed(self.path)
| |
"""
ha_test.test_core
~~~~~~~~~~~~~~~~~
Provides tests to verify that Home Assistant core works.
"""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import os
import unittest
import time
import threading
from datetime import datetime
import homeassistant as ha
class TestHomeAssistant(unittest.TestCase):
"""
Tests the Home Assistant core classes.
Currently only includes tests to test cases that do not
get tested in the API integration tests.
"""
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.hass = ha.HomeAssistant()
self.hass.states.set("light.Bowl", "on")
self.hass.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_get_config_path(self):
""" Test get_config_path method. """
self.assertEqual(os.path.join(os.getcwd(), "config"),
self.hass.config_dir)
self.assertEqual(os.path.join(os.getcwd(), "config", "test.conf"),
self.hass.get_config_path("test.conf"))
def test_block_till_stoped(self):
""" Test if we can block till stop service is called. """
blocking_thread = threading.Thread(target=self.hass.block_till_stopped)
self.assertFalse(blocking_thread.is_alive())
blocking_thread.start()
# Python will now give attention to the other thread
time.sleep(1)
self.assertTrue(blocking_thread.is_alive())
self.hass.services.call(ha.DOMAIN, ha.SERVICE_HOMEASSISTANT_STOP)
self.hass._pool.block_till_done()
# hass.block_till_stopped checks every second if it should quit
# we have to wait worst case 1 second
wait_loops = 0
while blocking_thread.is_alive() and wait_loops < 10:
wait_loops += 1
time.sleep(0.1)
self.assertFalse(blocking_thread.is_alive())
def test_track_point_in_time(self):
""" Test track point in time. """
before_birthday = datetime(1985, 7, 9, 12, 0, 0)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0)
after_birthday = datetime(1987, 7, 9, 12, 0, 0)
runs = []
self.hass.track_point_in_time(
lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(before_birthday)
self.hass._pool.block_till_done()
self.assertEqual(0, len(runs))
self._send_time_changed(birthday_paulus)
self.hass._pool.block_till_done()
self.assertEqual(1, len(runs))
# A point in time tracker will only fire once, this should do nothing
self._send_time_changed(birthday_paulus)
self.hass._pool.block_till_done()
self.assertEqual(1, len(runs))
self.hass.track_point_in_time(
lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(after_birthday)
self.hass._pool.block_till_done()
self.assertEqual(2, len(runs))
def test_track_time_change(self):
""" Test tracking time change. """
wildcard_runs = []
specific_runs = []
self.hass.track_time_change(lambda x: wildcard_runs.append(1))
self.hass.track_time_change(
lambda x: specific_runs.append(1), second=[0, 30])
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 15))
self.hass._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass._pool.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
def _send_time_changed(self, now):
""" Send a time changed event. """
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
class TestEvent(unittest.TestCase):
""" Test Event class. """
def test_repr(self):
""" Test that repr method works. #MoreCoverage """
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
class TestEventBus(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.bus = ha.EventBus()
self.bus.listen('test_event', lambda x: len)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.bus._pool.stop()
def test_add_remove_listener(self):
""" Test remove_listener method. """
old_count = len(self.bus.listeners)
listener = lambda x: len
self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Try deleting a non registered listener, nothing should happen
self.bus.remove_listener('test', lambda x: len)
# Remove listener
self.bus.remove_listener('test', listener)
self.assertEqual(old_count, len(self.bus.listeners))
# Try deleting listener while category doesn't exist either
self.bus.remove_listener('test', listener)
def test_listen_once_event(self):
""" Test listen_once_event method. """
runs = []
self.bus.listen_once('test_event', lambda x: runs.append(1))
self.bus.fire('test_event')
self.bus._pool.block_till_done()
self.assertEqual(1, len(runs))
# Second time it should not increase runs
self.bus.fire('test_event')
self.bus._pool.block_till_done()
self.assertEqual(1, len(runs))
class TestState(unittest.TestCase):
""" Test EventBus methods. """
def test_init(self):
""" Test state.init """
self.assertRaises(
ha.InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_repr(self):
""" Test state.repr """
self.assertEqual("<state on @ 12:00:00 08-12-1984>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual("<state on:brightness=144 @ 12:00:00 08-12-1984>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.bus = ha.EventBus()
self.states = ha.StateMachine(self.bus)
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.bus._pool.stop()
def test_is_state(self):
""" Test is_state method. """
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_entity_ids(self):
""" Test get_entity_ids method. """
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.Bowl' in ent_ids)
self.assertTrue('switch.AC' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.Bowl' in ent_ids)
def test_remove(self):
""" Test remove method. """
self.assertTrue('light.Bowl' in self.states.entity_ids())
self.assertTrue(self.states.remove('light.Bowl'))
self.assertFalse('light.Bowl' in self.states.entity_ids())
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
def test_track_change(self):
""" Test states.track_change. """
# 2 lists to track how often our callbacks got called
specific_runs = []
wildcard_runs = []
self.states.track_change(
'light.Bowl', lambda a, b, c: specific_runs.append(1), 'on', 'off')
self.states.track_change(
'light.Bowl', lambda a, b, c: wildcard_runs.append(1),
ha.MATCH_ALL, ha.MATCH_ALL)
# Set same state should not trigger a state change/listener
self.states.set('light.Bowl', 'on')
self.bus._pool.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(0, len(wildcard_runs))
# State change off -> on
self.states.set('light.Bowl', 'off')
self.bus._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
# State change off -> off
self.states.set('light.Bowl', 'off', {"some_attr": 1})
self.bus._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
# State change off -> on
self.states.set('light.Bowl', 'on')
self.bus._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
class TestServiceCall(unittest.TestCase):
""" Test ServiceCall class. """
def test_repr(self):
""" Test repr method. """
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.pool = ha.create_worker_pool()
self.bus = ha.EventBus(self.pool)
self.services = ha.ServiceRegistry(self.bus, self.pool)
self.services.register("test_domain", "test_service", lambda x: len)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.pool.stop()
def test_has_service(self):
""" Test has_service method. """
self.assertTrue(
self.services.has_service("test_domain", "test_service"))
| |
# --------------------------------------------------------------------------------------
# Author: cgarcia@umw.edu
# About: This file contains utility functions and classes used specifically in
# running scenarios and generating result reports
# --------------------------------------------------------------------------------------
import util as ut
from scipy import stats
import math
from knn import *
#-------------------------- STATISTICAL FUNCTIONS ------------------------
# Performs 2-sample proportion test of form:
# H0: p1 = p2, H1: p1 != p2
# Sample 1 and sample 2 are lists of 0's and 1'sample
# Returns a p-value
def proportion_test(sample_1, sample_2):
n1 = float(len(sample_1))
n2 = float(len(sample_2))
p1 = float(sum(sample_1)) / n1
p2 = float(sum(sample_2)) / n2
z = (p1 - p2) / math.sqrt(((p1 * (1.0 - p1)) / n1) + ((p2 * (1.0 - p2)) / n2))
return stats.norm().cdf(1.0 - z)
# Get simple mean of the values.
def mean(vals):
return float(sum(vals)) / float(len(vals))
#-------------------------- UTILITY CLASSES ------------------------------
# This is a basic logger which prints output to the command line and
# writes the log file to the specified output file.
class BasicLogger(object):
def __init__(self):
self.lines = []
def log(self, line, level='standard'):
if level.lower() == 'report':
self.lines.append(str(line))
print(line)
def write(self, output_file):
ut.write_file("\n".join(self.lines), output_file)
# This is a simple class to record and accumulate artifacts
# generated in a scenario
class ScenarioRecorder(object):
def __init__(self):
self.records = {}
# Add a new value to the specified key's value list
def add(self, key, val):
if not(self.records.has_key(key)):
self.records[key] = []
self.records[key].append(val)
# Set a key's value
def set(self, key, val):
self.records[key] = val
# Get whatever is corresponding to the key
def get(self, key):
if self.records.has_key(key):
return self.records[key]
return 'NA'
# If the key holds a list of lists, join them all together into
# into one master list before returning.
def get_flatten(self, key):
try:
return reduce(lambda x, y: x + y, self.records[key])
except:
return get(key)
# Get the keys for this recorder. If a prefix is specified,
# Get keys which start with the prefix.
def keys(self, prefix = None):
if not(prefix == None):
return filter(lambda x: x.startswith(prefix), self.records.keys())
return self.records.keys()
#-------------------------- UTILITY FUNCTIONS ----------------------------
# A solver is a function f: user -> msg
# Each element in solvers is a (solver, solver name) pair
def execute_trial(train_data, test_users, data_gen, solvers, recorder,
trial_name = None, measures_per_user = 1,
logger = None):
results = []
if trial_name == None:
trial_name = ''
else:
trial_name = ': ' + trial_name
logger_f = logger.log if logger != None else lambda x, y: None
logger_f = logger.log
logger_f('Executing comparison trial' + str(trial_name), 'standard')
for (f, solver_name) in solvers:
logger_f(" Starting solver: " + solver_name, 'standard')
start_time = ut.curr_time()
msgs = map(f, test_users)
elapsed = ut.curr_time() - start_time
resps = []
for i in range(measures_per_user):
resps += data_gen.gen_responses(test_users, msgs)
correct_frac = float(sum(resps)) / float(measures_per_user * len(resps))
results.append((solver_name, correct_frac, elapsed, resps))
add = lambda att, val: recorder.add(solver_name + '.' + str(att), val)
add('correct_frac', correct_frac)
add('responses', resps)
recorder.add('elapsed_time', elapsed)
logger_f(" Results (correct%, elapsed time): " + str((correct_frac, elapsed)), 'standard')
# A trial_initializer_f is a function which takes a recorder and logger as input and returns a tuple:
# (train_data, test_users, data_generator, [(solver_f, name)])
# An analyzer_f is a procedure which takes these args (in order):
# 1) a recorder
# 2) a logger,
# 3) a list solver names with the following convention:
# Control solvers start with control_ and treatment solvers start with solver_
def run_trials(trial_initializer_f, analyzer_f, num_trials, recorder, logger):
recorder.set('num_trials', num_trials)
main_start_time = ut.curr_time()
for t in range(1, num_trials + 1):
trial_start = ut.curr_time()
logger.log('Starting new trial, initializing...', 'standard')
train_data, test_users, data_generator, solvers = trial_initializer_f(recorder, logger)
logger.log(' Time initializing: ' + str(ut.curr_time() - trial_start) + ' sec.', 'standard')
execute_trial(train_data, test_users, data_generator, solvers, recorder,
trial_name = 'Trial ' + str(t), logger = logger)
main_elapsed = ut.curr_time() - main_start_time
recorder.set('main.elapsed_time', main_elapsed)
analyzer_f(recorder, logger, map(lambda (x, y): y, solvers))
# For a list of test users and test messages, return the n best-performing.
# Used for a control case to compare other algorithms to.
# **NOTE: param msgs can be either 1) an integer, or 2) a list of pre-made messages
# If it is an integer, the specified number of random messages will be generated.
def n_best_messages(users, data_gen, msgs, n):
if type(msgs) == type(0):
msgs = data_gen.gen_random_inters(msgs)
rows = zip(*data_gen.gen_crossprod_rows(users, msgs))
mcount = lambda m: sum(map(lambda x: x[2], filter(lambda y: y[1] == m, rows)))
pos_count = lambda y: sum(map(lambda x: x[2], filter(lambda z: y == z[1], tups)))
results = map(lambda msg: (msg, mcount(msg)), msgs)
return map(lambda (msg, _): msg, ut.top_n(results, n, lambda y: y[1]))
# Build (solver, name) pairs for each of the 3 standard controls
# which can go into execute_trial.
# **NOTE: param msgs can be either 1) an integer, or 2) a list of pre-made messages
# If it is an integer, the specified number of random messages will be generated.
def build_std_control_solvers(calibration_users, data_gen, msgs = 100, top_n = 15):
b = data_gen
if(type(msgs)) == type(0):
msgs = n_best_messages(calibration_users, b, msgs, msgs)
best_msgs = n_best_messages(calibration_users, b, msgs, top_n)
# Control 1: select a random message each time
ctrl_1 = lambda u: rd.sample(msgs, 1)[0]
# Control 2: Always give the best performing out of the 100
ctrl_2 = lambda u: best_msgs[0]
# Control 3: randomly select one of the top 15 messages for each user
ctrl_3 = lambda u: rd.sample(best_msgs, 1)[0]
solvers = [(ctrl_1, 'control_1'),
(ctrl_2, 'control_2'),
(ctrl_3, 'control_3')]
return solvers
# Builds all KNN solvers in (solver, name) pairs, which can go
# which can go into execute_trial.
def build_all_knn_optims(train_data, calibration_users, data_gen, recorder,
min_k = 1, max_k = 15):
b = data_gen
op = KNNOptimizer()
op.set_data_rows(train_data)
op.set_similarity_f(match_count)
asf_1 = build_weighted_mode_selector(lambda x: 1)
asf_2 = build_weighted_mode_selector(lambda x: 10**x)
asf_3 = build_weighted_max_pos_proportion_selector(lambda x: 1)
asf_4 = build_weighted_max_pos_proportion_selector(lambda x: 10**x)
response_f = lambda u, m: b.gen_response(u, m)
k1 = op.find_best_k(calibration_users, min_k, max_k, asf_1, response_f)
k2 = op.find_best_k(calibration_users, min_k, max_k, asf_2, response_f)
k3 = op.find_best_k(calibration_users, min_k, max_k, asf_3, response_f)
k4 = op.find_best_k(calibration_users, min_k, max_k, asf_4, response_f)
recorder.add('solver_1.k', k1)
recorder.add('solver_2.k', k2)
recorder.add('solver_3.k', k3)
recorder.add('solver_4.k', k4)
print('k1, k2: ' + str((k1, k2)))
f_1 = lambda u: op.optimize(u, k1, asf_1)
f_2 = lambda u: op.optimize(u, k2, asf_2)
f_3 = lambda u: op.optimize(u, k3, asf_3)
f_4 = lambda u: op.optimize(u, k4, asf_4)
solvers = [(f_1, 'solver_1'),
(f_2, 'solver_2'),
(f_3, 'solver_3'),
(f_4, 'solver_4')
]
return solvers
# Builds standard (mode-based) KNN solvers in (solver, name) pairs, which can go
# which can go into execute_trial.
def build_std_knn_optims(train_data, calibration_users, data_gen, recorder,
min_k = 1, max_k = 15):
b = data_gen
op = KNNOptimizer()
op.set_data_rows(train_data)
op.set_similarity_f(match_count)
asf_1 = build_weighted_mode_selector(lambda x: 1)
asf_2 = build_weighted_mode_selector(lambda x: 10**x)
response_f = lambda u, m: b.gen_response(u, m)
k1 = op.find_best_k(calibration_users, min_k, max_k, asf_1, response_f)
k2 = op.find_best_k(calibration_users, min_k, max_k, asf_2, response_f)
recorder.add('solver_1.k', k1)
recorder.add('solver_2.k', k2)
print('k1, k2: ' + str((k1, k2)))
f_1 = lambda u: op.optimize(u, k1, asf_1)
f_2 = lambda u: op.optimize(u, k2, asf_2)
solvers = [(f_1, 'solver_1'),
(f_2, 'solver_2')
]
return solvers
def standard_analyzer_f(recdr, logr, solver_names):
log = lambda *x: logr.log(' '.join(map(lambda y: str(y), x)), 'report')
key = lambda x, y = None: str(x) + '.' + (y) if y != None else str(x)
get = lambda prefix, att = None: recdr.get(key(prefix, att))
fget = lambda prefix, att = None: recdr.get_flatten(key(prefix, att))
pt = lambda s1, s2: proportion_test(fget(s1), fget(s2))
ctrls = filter(lambda x: x.startswith('control'), solver_names)
tmts = filter(lambda x: x.startswith('solver'), solver_names)
all = ctrls + tmts
log('-------------------- RESULTS ------------------------')
log('Number of trials: ', get('num_trials'))
for s in tmts:
log(s + ' avg. k: ', mean(get(s, 'k')))
for s in ctrls:
log(s + ' avg. success %: ', mean(get(s, 'correct_frac')),
', (min, max) success %: ', (min(get(s, 'correct_frac')), max(get(s, 'correct_frac'))))
for s in tmts:
log(s + ' avg. success %: ', mean(get(s, 'correct_frac')),
', (min, max) success %: ', (min(get(s, 'correct_frac')), max(get(s, 'correct_frac'))))
for c in ctrls:
for s in tmts:
log(s + ' vs. ' + c + ' (p-val): ', pt(s + '.responses', c + '.responses'))
for i in range(len(tmts) - 1):
for j in range(1, len(tmts)):
log(tmts[i] + ' vs. ' + tmts[j] + ' (p-val): ',
pt(tmts[i] + '.responses', tmts[j] + '.responses'))
log(tmts[j] + ' vs. ' + tmts[i] + ' (p-val): ',
pt(tmts[j] + '.responses', tmts[i] + '.responses'))
for s in tmts:
for c in ctrls:
log('Avg ' + s + '/ ' + c + ' ratio: ', max(get(s, 'correct_frac')) / max(get(c, 'correct_frac')))
log('-------------------- TOTAL ELAPSED TIME: ', get('main', 'elapsed_time'), ' sec.')
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import glob
import os
import xml.dom.minidom as DOM
from textwrap import dedent
import coverage
from mock import patch
from pants.backend.python.tasks.pytest_run import PytestRun
from pants.base.exceptions import TestFailedTaskError
from pants.util.contextutil import pushd
from pants.util.timeout import TimeoutReached
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonTestBuilderTestBase(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PytestRun
def run_tests(self, targets, **options):
test_options = {
'colors': False,
'level': 'info' # When debugging a test failure it may be helpful to set this to 'debug'.
}
test_options.update(options)
self.set_options(**test_options)
context = self.context(target_roots=targets)
pytest_run_task = self.create_task(context)
with pushd(self.build_root):
pytest_run_task.execute()
def run_failing_tests(self, targets, failed_targets, **options):
with self.assertRaises(TestFailedTaskError) as cm:
self.run_tests(targets=targets, **options)
self.assertEqual(set(failed_targets), set(cm.exception.failed_targets))
class PythonTestBuilderTestEmpty(PythonTestBuilderTestBase):
def test_empty(self):
self.run_tests(targets=[])
class PythonTestBuilderTest(PythonTestBuilderTestBase):
def setUp(self):
super(PythonTestBuilderTest, self).setUp()
self.create_file(
'lib/core.py',
dedent("""
def one(): # line 1
return 1 # line 2
# line 3
# line 4
def two(): # line 5
return 2 # line 6
""").strip())
self.add_to_build_file(
'lib',
dedent("""
python_library(
name='core',
sources=[
'core.py'
]
)
"""))
self.create_file(
'tests/test_core_green.py',
dedent("""
import unittest2 as unittest
import core
class CoreGreenTest(unittest.TestCase):
def test_one(self):
self.assertEqual(1, core.one())
"""))
self.create_file(
'tests/test_core_red.py',
dedent("""
import core
def test_two():
assert 1 == core.two()
"""))
self.create_file(
'tests/test_core_red_in_class.py',
dedent("""
import unittest2 as unittest
import core
class CoreRedClassTest(unittest.TestCase):
def test_one_in_class(self):
self.assertEqual(1, core.two())
"""))
self.create_file(
'tests/test_core_sleep.py',
dedent("""
import core
def test_three():
assert 1 == core.one()
"""))
self.add_to_build_file(
'tests',
dedent("""
python_tests(
name='green',
sources=[
'test_core_green.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red',
sources=[
'test_core_red.py',
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='red_in_class',
sources=[
'test_core_red_in_class.py',
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='sleep_no_timeout',
sources=[
'test_core_sleep.py',
],
timeout = 0,
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='sleep_timeout',
sources=[
'test_core_sleep.py',
],
timeout = 1,
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
python_tests(
name='all',
sources=[
'test_core_green.py',
'test_core_red.py',
],
dependencies=[
'lib:core'
]
)
python_tests(
name='all-with-coverage',
sources=[
'test_core_green.py',
'test_core_red.py'
],
dependencies=[
'lib:core'
],
coverage=[
'core'
]
)
"""))
self.green = self.target('tests:green')
self.red = self.target('tests:red')
self.red_in_class = self.target('tests:red_in_class')
self.sleep_no_timeout = self.target('tests:sleep_no_timeout')
self.sleep_timeout = self.target('tests:sleep_timeout')
self.all = self.target('tests:all')
self.all_with_coverage = self.target('tests:all-with-coverage')
def test_green(self):
self.run_tests(targets=[self.green])
def test_red(self):
self.run_failing_tests(targets=[self.red], failed_targets=[self.red])
def test_red_test_in_class(self):
# for test in a class, the failure line is in the following format
# F testprojects/tests/python/pants/constants_only/test_fail.py::TestClassName::test_boom
self.run_failing_tests(targets=[self.red_in_class], failed_targets=[self.red_in_class])
def test_mixed(self):
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red])
def test_one_timeout(self):
"""When we have two targets, any of them doesn't have a timeout, and we have no default, then no timeout is set."""
with patch('pants.backend.core.tasks.test_task_mixin.Timeout') as mock_timeout:
self.run_tests(targets=[self.sleep_no_timeout, self.sleep_timeout])
# Ensures that Timeout is instantiated with no timeout.
args, kwargs = mock_timeout.call_args
self.assertEqual(args, (None,))
def test_timeout(self):
"""Check that a failed timeout returns the right results."""
with patch('pants.backend.core.tasks.test_task_mixin.Timeout') as mock_timeout:
mock_timeout().__exit__.side_effect = TimeoutReached(1)
self.run_failing_tests(targets=[self.sleep_timeout],
failed_targets=[self.sleep_timeout])
# Ensures that Timeout is instantiated with a 1 second timeout.
args, kwargs = mock_timeout.call_args
self.assertEqual(args, (1,))
def test_junit_xml_option(self):
# We expect xml of the following form:
# <testsuite errors=[Ne] failures=[Nf] skips=[Ns] tests=[Nt] ...>
# <testcase classname="..." name="..." .../>
# <testcase classname="..." name="..." ...>
# <failure ...>...</failure>
# </testcase>
# </testsuite>
report_basedir = os.path.join(self.build_root, 'dist', 'junit_option')
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
junit_xml_dir=report_basedir)
files = glob.glob(os.path.join(report_basedir, '*.xml'))
self.assertEqual(1, len(files), 'Expected 1 file, found: {}'.format(files))
junit_xml = files[0]
root = DOM.parse(junit_xml).documentElement
self.assertEqual(2, len(root.childNodes))
self.assertEqual(2, int(root.getAttribute('tests')))
self.assertEqual(1, int(root.getAttribute('failures')))
self.assertEqual(0, int(root.getAttribute('errors')))
self.assertEqual(0, int(root.getAttribute('skips')))
children_by_test_name = dict((elem.getAttribute('name'), elem) for elem in root.childNodes)
self.assertEqual(0, len(children_by_test_name['test_one'].childNodes))
self.assertEqual(1, len(children_by_test_name['test_two'].childNodes))
self.assertEqual('failure', children_by_test_name['test_two'].firstChild.nodeName)
def coverage_data_file(self):
return os.path.join(self.build_root, '.coverage')
def load_coverage_data(self, path):
data_file = self.coverage_data_file()
self.assertTrue(os.path.isfile(data_file))
coverage_data = coverage.coverage(data_file=data_file)
coverage_data.load()
_, all_statements, not_run_statements, _ = coverage_data.analysis(path)
return all_statements, not_run_statements
def test_coverage_simple_option(self):
# TODO(John Sirois): Consider eliminating support for "simple" coverage or at least formalizing
# the coverage option value that turns this on to "1" or "all" or "simple" = anything formal.
simple_coverage_kwargs = {'coverage': '1'}
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_tests(targets=[self.green], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([6], not_run_statements)
self.run_failing_tests(targets=[self.red], failed_targets=[self.red], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([2], not_run_statements)
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
**simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
# The all target has no coverage attribute and the code under test does not follow the
# auto-discover pattern so we should get no coverage.
self.run_failing_tests(targets=[self.all], failed_targets=[self.all], **simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
self.run_failing_tests(targets=[self.all_with_coverage],
failed_targets=[self.all_with_coverage],
**simple_coverage_kwargs)
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_coverage_modules_dne_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
# modules: should trump .coverage
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
coverage='modules:does_not_exist,nor_does_this')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
def test_coverage_modules_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_failing_tests(targets=[self.all], failed_targets=[self.all], coverage='modules:core')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_coverage_paths_dne_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
# paths: should trump .coverage
self.run_failing_tests(targets=[self.green, self.red], failed_targets=[self.red],
coverage='paths:does_not_exist/,nor_does_this/')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([1, 2, 5, 6], not_run_statements)
def test_coverage_paths_option(self):
self.assertFalse(os.path.isfile(self.coverage_data_file()))
covered_file = os.path.join(self.build_root, 'lib', 'core.py')
self.run_failing_tests(targets=[self.all], failed_targets=[self.all], coverage='paths:core.py')
all_statements, not_run_statements = self.load_coverage_data(covered_file)
self.assertEqual([1, 2, 5, 6], all_statements)
self.assertEqual([], not_run_statements)
def test_sharding(self):
self.run_failing_tests(targets=[self.red, self.green], failed_targets=[self.red], shard='0/2')
self.run_tests(targets=[self.red, self.green], shard='1/2')
def test_sharding_single(self):
self.run_failing_tests(targets=[self.red], failed_targets=[self.red], shard='0/1')
def test_sharding_invalid_shard_too_small(self):
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='-1/1')
def test_sharding_invalid_shard_too_big(self):
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1/1')
def test_sharding_invalid_shard_bad_format(self):
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1')
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1/2/3')
with self.assertRaises(PytestRun.InvalidShardSpecification):
self.run_tests(targets=[self.green], shard='1/a')
| |
#!/usr/bin/env python2
# encoding: utf-8
# Author: Alexandre Fonseca
# Description:
# Installs, configures and manages Hadoop on a set of nodes
# in a cluster.
# Associated guide:
# http://www.alexjf.net/blog/distributed-systems/hadoop-yarn-installation-definitive-guide
import os
from fabric.api import run, cd, env, settings, put, sudo
from fabric.decorators import runs_once, parallel
from fabric.tasks import execute
###############################################################
# START OF YOUR CONFIGURATION (CHANGE FROM HERE, IF NEEDED) #
###############################################################
#### Generic ####
SSH_USER = "ubuntu"
# If you need to specify a special ssh key, do it here (e.g EC2 key)
#env.key_filename = "~/.ssh/giraph.pem"
#### EC2 ####
# Is this an EC2 deployment? If so, then we'll autodiscover the right nodes.
EC2 = False
# In case this is an EC2 deployment, all cluster nodes must have a tag with
# 'Cluster' as key and the following property as value.
EC2_CLUSTER_NAME = "rtgiraph"
# Read AWS access key details from env if available
AWS_ACCESSKEY_ID = os.getenv("AWS_ACCESSKEY_ID", "undefined")
AWS_ACCESSKEY_SECRET = os.getenv("AWS_ACCESSKEY_SECRET", "undefined")
# In case the instances you use have an extra storage device which is not
# automatically mounted, specify here the path to that device.
EC2_INSTANCE_STORAGEDEV = None
#EC2_INSTANCE_STORAGEDEV = "/dev/xvdb" For Ubuntu r3.xlarge instances
#### Package Information ####
HADOOP_VERSION = "1.2.1"
HADOOP_PACKAGE = "hadoop-%s" % HADOOP_VERSION
HADOOP_PACKAGE_URL = "http://apache.crihan.fr/dist/hadoop/common/stable1/%s.tar.gz" % HADOOP_PACKAGE
HADOOP_PREFIX = "/home/ubuntu/Programs/%s" % HADOOP_PACKAGE
HADOOP_CONF = os.path.join(HADOOP_PREFIX, "conf")
#### Installation information ####
# Change this to the command you would use to install packages on the
# remote hosts.
PACKAGE_MANAGER_INSTALL = "apt-get -qq install %s" # Debian/Ubuntu
#PACKAGE_MANAGER_INSTALL = "pacman -S %s" # Arch Linux
#PACKAGE_MANAGER_INSTALL = "yum install %s" # CentOS
# Change this list to the list of packages required by Hadoop
# In principle, should just be a JRE for Hadoop, Python
# for the Hadoop Configuration replacement script and wget
# to get the Hadoop package
REQUIREMENTS = ["wget", "python", "openjdk-7-jre-headless"] # Debian/Ubuntu
#REQUIREMENTS = ["wget", "python", "jre7-openjdk-headless"] # Arch Linux
#REQUIREMENTS = ["wget", "python", "java-1.7.0-openjdk-devel"] # CentOS
# Commands to execute (in order) before installing listed requirements
# (will run as root). Use to configure extra repos or update repos
REQUIREMENTS_PRE_COMMANDS = []
# If you want to install Oracle's Java instead of using the OpenJDK that
# comes preinstalled with most distributions replace the previous options
# with a variation of the following: (UBUNTU only)
#REQUIREMENTS = ["wget", "python", "oracle-java7-installer"] # Debian/Ubuntu
#REQUIREMENTS_PRE_COMMANDS = [
#"add-apt-repository ppa:webupd8team/java -y",
#"apt-get -qq update",
#"echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections",
#"echo debconf shared/accepted-oracle-license-v1-1 seen true | debconf-set-selections",
#]
#### Environment ####
# Set this to True/False depending on whether or not ENVIRONMENT_FILE
# points to an environment file that is automatically loaded in a new
# shell session
ENVIRONMENT_FILE_NOTAUTOLOADED = False
ENVIRONMENT_FILE = "/home/ubuntu/.bashrc"
#ENVIRONMENT_FILE_NOTAUTOLOADED = True
#ENVIRONMENT_FILE = "/home/ubuntu/hadoop2_env.sh"
# Should the ENVIRONMENT_VARIABLES be applies to a clean (empty) environment
# file or should they simply be merged (only additions and updates) into the
# existing environment file? In any case, the previous version of the file
# will be backed up.
ENVIRONMENT_FILE_CLEAN = False
ENVIRONMENT_VARIABLES = [
("JAVA_HOME", "/usr/lib/jvm/java-7-openjdk-amd64"), # Debian/Ubuntu 64 bits
#("JAVA_HOME", "/usr/lib/jvm/java-7-openjdk"), # Arch Linux
#("JAVA_HOME", "/usr/java/jdk1.7.0_51"), # CentOS
("HADOOP_PREFIX", HADOOP_PREFIX),
("HADOOP_HOME", r"\\$HADOOP_PREFIX"),
("HADOOP_COMMON_HOME", r"\\$HADOOP_PREFIX"),
("HADOOP_CONF_DIR", r"\\$HADOOP_PREFIX/conf"),
("HADOOP_HDFS_HOME", r"\\$HADOOP_PREFIX"),
("HADOOP_MAPRED_HOME", r"\\$HADOOP_PREFIX"),
("HADOOP_YARN_HOME", r"\\$HADOOP_PREFIX"),
("HADOOP_PID_DIR", "/tmp/hadoop_%s" % HADOOP_VERSION),
("YARN_PID_DIR", r"\\$HADOOP_PID_DIR"),
("PATH", r"\\$HADOOP_PREFIX/bin:\\$PATH"),
]
#### Host data (for non-EC2 deployments) ####
HOSTS_FILE="/etc/hosts"
NET_INTERFACE="eth0"
NAMENODE_HOST = "namenode.alexjf.net"
JOBTRACKER_HOST = "jobtracker.alexjf.net"
JOBTRACKER_PORT = 9021
SLAVE_HOSTS = ["slave%d.alexjf.net" % i for i in range(1, 6)]
# Or equivalently
#SLAVE_HOSTS = ["slave1.alexjf.net", "slave2.alexjf.net",
# "slave3.alexjf.net", "slave4.alexjf.net",
# "slave5.alexjf.net"]
#### Configuration ####
# Should the configuration options be applied to a clean (empty) configuration
# file or should they simply be merged (only additions and updates) into the
# existing environment file? In any case, the previous version of the file
# will be backed up.
CONFIGURATION_FILES_CLEAN = False
HADOOP_TEMP = "/mnt/hadoop/tmp"
HDFS_DATA_DIR = "/mnt/hdfs/datanode"
HDFS_NAME_DIR = "/mnt/hdfs/namenode"
IMPORTANT_DIRS = [HADOOP_TEMP, HDFS_DATA_DIR, HDFS_NAME_DIR]
# Need to do this in a function so that we can rewrite the values when any
# of the hosts change in runtime (e.g. EC2 node discovery).
def updateHadoopSiteValues():
global CORE_SITE_VALUES, HDFS_SITE_VALUES, YARN_SITE_VALUES, MAPRED_SITE_VALUES
CORE_SITE_VALUES = {
"fs.default.name": "hdfs://%s:9020/" % NAMENODE_HOST,
}
HDFS_SITE_VALUES = {
"dfs.data.dir": "%s" % HDFS_DATA_DIR,
"dfs.name.dir": "%s" % HDFS_NAME_DIR,
"dfs.http.address": "0.0.0.0:50071",
"dfs.datanode.address": "0.0.0.0:50011",
"dfs.datanode.ipc.address": "0.0.0.0:50021",
"dfs.datanode.http.address": "0.0.0.0:50076",
"dfs.permissions": "false",
}
MAPRED_SITE_VALUES = {
"mapred.map.child.java.opts": "-Xmx768m",
"mapred.reduce.child.java.opts": "-Xmx768m",
"mapred.job.tracker.http.address": "0.0.0.0:50031",
"mapred.task.tracker.http.address": "0.0.0.0:50061",
}
##############################################################
# END OF YOUR CONFIGURATION (CHANGE UNTIL HERE, IF NEEDED) #
##############################################################
#####################################################################
# DON'T CHANGE ANYTHING BELOW (UNLESS YOU KNOW WHAT YOU'RE DOING) #
#####################################################################
CORE_SITE_VALUES = {}
HDFS_SITE_VALUES = {}
MAPRED_SITE_VALUES = {}
def bootstrapFabric():
if EC2:
readHostsFromEC2()
updateHadoopSiteValues()
env.user = SSH_USER
hosts = [NAMENODE_HOST, JOBTRACKER_HOST] + SLAVE_HOSTS
seen = set()
# Remove empty hosts and duplicates
cleanedHosts = [host for host in hosts if host and host not in seen and not seen.add(host)]
env.hosts = cleanedHosts
if JOBTRACKER_HOST:
MAPRED_SITE_VALUES["mapred.job.tracker"] = "%s:%s" % \
(JOBTRACKER_HOST, JOBTRACKER_PORT)
# MAIN FUNCTIONS
def forceStopEveryJava():
run("jps | grep -vi jps | cut -d ' ' -f 1 | xargs -L1 -r kill")
@runs_once
def debugHosts():
print("Name node: {}".format(NAMENODE_HOST))
print("Job Tracker: {}".format(JOBTRACKER_HOST))
print("Slaves: {}".format(SLAVE_HOSTS))
def bootstrap():
with settings(warn_only=True):
if EC2_INSTANCE_STORAGEDEV and run("mountpoint /mnt").failed:
sudo("mkfs.ext4 %s" % EC2_INSTANCE_STORAGEDEV)
sudo("mount %s /mnt" % EC2_INSTANCE_STORAGEDEV)
sudo("chmod 0777 /mnt")
sudo("rm -rf /tmp/hadoop-ubuntu")
ensureImportantDirectoriesExist()
installDependencies()
install()
setupEnvironment()
config()
setupHosts()
formatHdfs()
def ensureImportantDirectoriesExist():
for importantDir in IMPORTANT_DIRS:
ensureDirectoryExists(importantDir)
def installDependencies():
for command in REQUIREMENTS_PRE_COMMANDS:
sudo(command)
for requirement in REQUIREMENTS:
sudo(PACKAGE_MANAGER_INSTALL % requirement)
def install():
installDirectory = os.path.dirname(HADOOP_PREFIX)
run("mkdir -p %s" % installDirectory)
with cd(installDirectory):
with settings(warn_only=True):
if run("test -f %s.tar.gz" % HADOOP_PACKAGE).failed:
run("wget -O %s.tar.gz %s" % (HADOOP_PACKAGE, HADOOP_PACKAGE_URL))
run("tar --overwrite -xf %s.tar.gz" % HADOOP_PACKAGE)
def config():
changeHadoopProperties("core-site.xml", CORE_SITE_VALUES)
changeHadoopProperties("hdfs-site.xml", HDFS_SITE_VALUES)
changeHadoopProperties("mapred-site.xml", MAPRED_SITE_VALUES)
def configRevertPrevious():
revertHadoopPropertiesChange("core-site.xml")
revertHadoopPropertiesChange("hdfs-site.xml")
revertHadoopPropertiesChange("mapred-site.xml")
def setupEnvironment():
with settings(warn_only=True):
if not run("test -f %s" % ENVIRONMENT_FILE).failed:
op = "cp"
if ENVIRONMENT_FILE_CLEAN:
op = "mv"
currentBakNumber = getLastBackupNumber(ENVIRONMENT_FILE) + 1
run("%(op)s %(file)s %(file)s.bak%(bakNumber)d" %
{"op": op, "file": ENVIRONMENT_FILE, "bakNumber": currentBakNumber})
run("touch %s" % ENVIRONMENT_FILE)
for variable, value in ENVIRONMENT_VARIABLES:
lineNumber = run("grep -n 'export\s\+%(var)s\=' '%(file)s' | cut -d : -f 1" %
{"var": variable, "file": ENVIRONMENT_FILE})
try:
lineNumber = int(lineNumber)
run("sed -i \"" + str(lineNumber) + "s@.*@export %(var)s\=%(val)s@\" '%(file)s'" %
{"var": variable, "val": value, "file": ENVIRONMENT_FILE})
except ValueError:
run("echo \"export %(var)s=%(val)s\" >> \"%(file)s\"" %
{"var": variable, "val": value, "file": ENVIRONMENT_FILE})
def environmentRevertPrevious():
revertBackup(ENVIRONMENT_FILE)
def formatHdfs():
if env.host == NAMENODE_HOST:
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop namenode -format")
@runs_once
def setupHosts():
privateIps = execute(getPrivateIp)
execute(updateHosts, privateIps)
if env.host == JOBTRACKER_HOST:
run("rm -f privateIps")
run("touch privateIps")
for host, privateIp in privateIps.items():
run("echo '%s' >> privateIps" % privateIp)
def start():
operationOnHadoopDaemons("start")
def stop():
operationOnHadoopDaemons("stop")
def test():
if env.host == JOBTRACKER_HOST:
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop dfs -rmr out")
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop jar \\$HADOOP_PREFIX/hadoop-examples-%s.jar randomwriter out" % HADOOP_VERSION)
# HELPER FUNCTIONS
def ensureDirectoryExists(directory):
with settings(warn_only=True):
if run("test -d %s" % directory).failed:
run("mkdir -p %s" % directory)
@parallel
def getPrivateIp():
if not EC2:
return run("ifconfig %s | grep 'inet\s\+' | awk '{print $2}' | cut -d':' -f2" % NET_INTERFACE).strip()
else:
return run("wget -qO- http://instance-data/latest/meta-data/local-ipv4")
@parallel
def updateHosts(privateIps):
with settings(warn_only=True):
if not run("test -f %s" % HOSTS_FILE).failed:
currentBakNumber = getLastBackupNumber(HOSTS_FILE) + 1
sudo("cp %(file)s %(file)s.bak%(bakNumber)d" %
{"file": HOSTS_FILE, "bakNumber": currentBakNumber})
sudo("touch %s" % HOSTS_FILE)
for host, privateIp in privateIps.items():
lineNumber = run("grep -n '^%(ip)s' '%(file)s' | cut -d : -f 1" %
{"ip": privateIp, "file": HOSTS_FILE})
try:
lineNumber = int(lineNumber)
sudo("sed -i \"" + str(lineNumber) + "s@.*@%(ip)s %(host)s@\" '%(file)s'" %
{"host": host, "ip": privateIp, "file": HOSTS_FILE})
except ValueError:
sudo("echo \"%(ip)s %(host)s\" >> \"%(file)s\"" %
{"host": host, "ip": privateIp, "file": HOSTS_FILE})
def getLastBackupNumber(filePath):
dirName = os.path.dirname(filePath)
fileName = os.path.basename(filePath)
with cd(dirName):
latestBak = run("ls -1 | grep %s.bak | tail -n 1" % fileName)
latestBakNumber = -1
if latestBak:
latestBakNumber = int(latestBak[len(fileName) + 4:])
return latestBakNumber
def changeHadoopProperties(fileName, propertyDict):
if not fileName or not propertyDict:
return
with cd(HADOOP_CONF):
with settings(warn_only=True):
import hashlib
replaceHadoopPropertyHash = \
hashlib.md5(
open("replaceHadoopProperty.py", 'rb').read()
).hexdigest()
if run("test %s = `md5sum replaceHadoopProperty.py | cut -d ' ' -f 1`"
% replaceHadoopPropertyHash).failed:
put("replaceHadoopProperty.py", HADOOP_CONF + "/")
run("chmod +x replaceHadoopProperty.py")
with settings(warn_only=True):
if not run("test -f %s" % fileName).failed:
op = "cp"
if CONFIGURATION_FILES_CLEAN:
op = "mv"
currentBakNumber = getLastBackupNumber(fileName) + 1
run("%(op)s %(file)s %(file)s.bak%(bakNumber)d" %
{"op": op, "file": fileName, "bakNumber": currentBakNumber})
run("touch %s" % fileName)
command = "./replaceHadoopProperty.py '%s' %s" % (fileName,
" ".join(["%s %s" % (str(key), str(value)) for key, value in propertyDict.items()]))
run(command)
def revertBackup(fileName):
dirName = os.path.dirname(fileName)
with cd(dirName):
latestBakNumber = getLastBackupNumber(fileName)
# We have already reverted all backups
if latestBakNumber == -1:
return
# Otherwise, perform reversion
else:
run("mv %(file)s.bak%(bakNumber)d %(file)s" %
{"file": fileName, "bakNumber": latestBakNumber})
def revertHadoopPropertiesChange(fileName):
revertBackup(os.path.join(HADOOP_CONF, fileName))
def operationInHadoopEnvironment(operation):
with cd(HADOOP_PREFIX):
command = operation
if ENVIRONMENT_FILE_NOTAUTOLOADED:
with settings(warn_only=True):
import hashlib
executeInHadoopEnvHash = \
hashlib.md5(
open("executeInHadoopEnv.sh", 'rb').read()
).hexdigest()
if run("test %s = `md5sum executeInHadoopEnv.sh | cut -d ' ' -f 1`"
% executeInHadoopEnvHash).failed:
put("executeInHadoopEnv.sh", HADOOP_PREFIX + "/")
run("chmod +x executeInHadoopEnv.sh")
command = ("./executeInHadoopEnv.sh %s " % ENVIRONMENT_FILE) + command
run(command)
def operationOnHadoopDaemons(operation):
# Start/Stop NameNode
if (env.host == NAMENODE_HOST):
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop-daemon.sh %s namenode" % operation)
# Start/Stop DataNode on all slave hosts
if env.host in SLAVE_HOSTS:
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop-daemon.sh %s datanode" % operation)
# Start/Stop ResourceManager
if (env.host == JOBTRACKER_HOST):
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop-daemon.sh %s jobtracker" % operation)
# Start/Stop NodeManager on all container hosts
if env.host in SLAVE_HOSTS:
operationInHadoopEnvironment(r"\\$HADOOP_PREFIX/bin/hadoop-daemon.sh %s tasktracker" % operation)
run("jps")
def readHostsFromEC2():
import boto.ec2
global NAMENODE_HOST, JOBTRACKER_HOST, SLAVE_HOSTS
NAMENODE_HOST = None
JOBTRACKER_HOST = None
SLAVE_HOSTS = []
conn = boto.ec2.connect_to_region("eu-west-1",
aws_access_key_id=AWS_ACCESSKEY_ID,
aws_secret_access_key=AWS_ACCESSKEY_SECRET)
instances = conn.get_only_instances(filters={'tag:Cluster': 'rtgiraph'})
for instance in instances:
instanceTags = instance.tags
instanceHost = instance.public_dns_name
if "namenode" in instanceTags:
NAMENODE_HOST = instanceHost
if "jobtracker" in instanceTags:
JOBTRACKER_HOST = instanceHost
SLAVE_HOSTS.append(instanceHost)
if SLAVE_HOSTS:
if NAMENODE_HOST is None:
NAMENODE_HOST = SLAVE_HOSTS[0]
if JOBTRACKER_HOST is None:
JOBTRACKER_HOST = SLAVE_HOSTS[0]
bootstrapFabric()
| |
#!/usr/bin/env python
# -*-coding:utf-8 -*
#
# Initial project created by chris@drumminhands.com
# Current fork made by Pedro Amorim (contact@pamorim.fr) and Vitor Amorim
import os
import glob
import time
from time import sleep
import traceback
import RPi.GPIO as GPIO
import picamera # http://picamera.readthedocs.org/en/release-1.4/install2.html
import atexit
import sys
import pygame
from pygame.locals import QUIT, KEYDOWN, K_ESCAPE, K_SPACE, K_p
import config # this is the config python file config.py
import cups
####################
# Variables Config #
####################
led_pin = config.led_pin
btn_pin = config.btn_pin
shutdown_btn_pin = config.shutdown_btn_pin
print_btn_pin = config.print_btn_pin
print_led_pin = config.print_led_pin
total_pics = 4 # number of pics to be taken
capture_delay = 2 # delay between pics
prep_delay = 3 # number of seconds at step 1 as users prep to have photo taken
gif_delay = 100 # How much time between frames in the animated gif
restart_delay = 3 # how long to display finished message before beginning a new session
# how much to wait in-between showing pics on-screen after taking
replay_delay = 1
replay_cycles = 1 # how many times to show each photo on-screen after taking
# full frame of v1 camera is 2592x1944. Wide screen max is 2592,1555
# if you run into resource issues, try smaller, like 1920x1152.
# or increase memory
# http://picamera.readthedocs.io/en/release-1.12/fov.html#hardware-limits
high_res_w = config.camera_high_res_w # width of high res image, if taken
high_res_h = config.camera_high_res_h # height of high res image, if taken
# Preview
if config.camera_landscape:
preview_w = config.monitor_w
preview_h = config.monitor_h
else:
preview_w = (config.monitor_h * config.monitor_h) / config.monitor_w
preview_h = config.monitor_h
#######################
# Photobooth image #
#######################
# Image ratio 4/3
image_h = 525
image_w = 700
margin = 50
# Printed image ratio 3/2
output_h = 1200
output_w = 1800
#############
# Variables #
#############
# Do not change these variables, as the code will change it anyway
transform_x = config.monitor_w # how wide to scale the jpg when replaying
transfrom_y = config.monitor_h # how high to scale the jpg when replaying
offset_x = 0 # how far off to left corner to display photos
offset_y = 0 # how far off to left corner to display photos
print_counter = 0
print_error = 'OK'
last_image_save = 'no_file'
if not config.camera_landscape:
tmp = image_h
image_h = image_w
image_w = tmp
tmp = output_h
output_h = output_w
output_w = tmp
tmp = high_res_h
high_res_h = high_res_w
high_res_w = tmp
################
# Other Config #
################
real_path = os.path.dirname(os.path.realpath(__file__))
def log(text):
print time.strftime('%Y/%m/%d %H:%M:%S') + " | " + text
###########################
# Init output directories #
###########################
# Check directory is writable
now = str(time.time()).split('.')[0] # get the current timestamp, and remove milliseconds
if (not os.path.exists(config.file_path)):
log("ERROR config.file_path not writeable fallback to SD : " + config.file_path)
output_path = real_path + "/" + now + "/"
else:
output_path = config.file_path + now + "/"
output_path_photobooth = output_path + "photobooth/"
# Create directories
os.makedirs(output_path_photobooth, 0777)
if (not os.access(output_path_photobooth, os.W_OK)):
log("ERROR output_path_photobooth not writeable: " + output_path_photobooth)
sys.exit()
##############
# Initialize #
##############
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(led_pin, GPIO.OUT) # LED
GPIO.setup(btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(shutdown_btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(print_btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(print_led_pin, GPIO.OUT) # LED
# for some reason the pin turns on at the beginning of the program. Why?
GPIO.output(led_pin, False)
GPIO.output(print_led_pin, False)
# initialize pygame
pygame.init()
pygame.display.set_mode((config.monitor_w, config.monitor_h))
screen = pygame.display.get_surface()
pygame.display.set_caption('Photo Booth Pics')
if not config.debug_mode:
pygame.mouse.set_visible(False) # hide the mouse cursor
pygame.display.toggle_fullscreen()
capture = pygame.mixer.Sound(real_path + "/camera-shutter-sound.wav")
#############
# Functions #
#############
@atexit.register
def cleanup():
"""
@brief clean up running programs as needed when main program exits
"""
log('Ended abruptly!')
pygame.quit()
GPIO.cleanup()
def clear_pics(channel):
"""
@brief delete files in pics folder
@param channel The channel
"""
files = glob.glob(output_path + '*')
for f in files:
os.remove(f)
# light the lights in series to show completed
log("Deleted previous pics")
for x in range(0, 3): # blink light
GPIO.output(led_pin, True)
GPIO.output(print_led_pin, True)
sleep(0.25)
GPIO.output(led_pin, False)
GPIO.output(print_led_pin, False)
sleep(0.25)
def set_demensions(img_w, img_h):
"""
@brief Set variables to properly display the image on full screen at right ratio
Note this only works when in booting in desktop mode.
When running in terminal, the size is not correct (it displays small).
Why?
@param img_w The image w
@param img_h The image h
"""
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config.monitor_w * img_h) / img_w
if (ratio_h < config.monitor_h):
# Use horizontal black bars
transform_y = ratio_h
transform_x = config.monitor_w
offset_y = (config.monitor_h - ratio_h) / 2
offset_x = 0
elif (ratio_h > config.monitor_h):
# Use vertical black bars
transform_x = (config.monitor_h * img_w) / img_h
transform_y = config.monitor_h
offset_x = (config.monitor_w - transform_x) / 2
offset_y = 0
else:
# No need for black bars as photo ratio equals screen ratio
transform_x = config.monitor_w
transform_y = config.monitor_h
offset_y = offset_x = 0
if config.debug_mode:
log("Screen resolution debug:")
print str(img_w) + " x " + str(img_h)
print "ratio_h: " + str(ratio_h)
print "transform_x: " + str(transform_x)
print "transform_y: " + str(transform_y)
print "offset_y: " + str(offset_y)
print "offset_x: " + str(offset_x)
def set_demensions_preview(img_w, img_h):
"""
@brief Set variables to properly display the image on screen at right ratio
@param img_w The image w
@param img_h The image h
"""
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config.monitor_w * img_h) / img_w
if (ratio_h < config.monitor_h):
# Use horizontal black bars
transform_y = ratio_h
transform_x = config.monitor_w
offset_y = (config.monitor_h - ratio_h * 3 / 4) / 2
offset_x = 0
elif (ratio_h > config.monitor_h):
# Use vertical black bars
transform_x = (config.monitor_h * img_w) / img_h
transform_y = config.monitor_h
offset_x = (config.monitor_w - transform_x * 3 / 4) / 2
offset_y = 0
else:
# No need for black bars as photo ratio equals screen ratio
transform_x = config.monitor_w
transform_y = config.monitor_h
offset_y = offset_x = 0
if config.debug_mode:
log("Screen resolution debug:")
print str(img_w) + " x " + str(img_h)
print "ratio_h: " + str(ratio_h)
print "transform_x: " + str(transform_x)
print "transform_y: " + str(transform_y)
print "offset_y: " + str(offset_y)
print "offset_x: " + str(offset_x)
def show_image(image_path):
"""
@brief Display one image on screen
@param image_path The image path
"""
# clear the screen
screen.fill((0, 0, 0))
# load the image
img = pygame.image.load(image_path)
img = img.convert()
# set pixel dimensions based on image
set_demensions(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x, transfrom_y))
screen.blit(img, (offset_x, offset_y))
pygame.display.flip()
def show_image_print(image_path):
"""
@brief Display the image being printed
@param image_path The image path
"""
show_image(real_path + "/printing.png")
# Load image
img = pygame.image.load(image_path)
# set pixel dimensions based on image
set_demensions_preview(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x * 3 / 4, transfrom_y * 3 / 4))
screen.blit(img, (offset_x, offset_y))
pygame.display.flip()
sleep(restart_delay)
show_intro()
def clear_screen():
"""
@brief display a blank screen
"""
screen.fill((0, 0, 0))
pygame.display.flip()
def display_pics(jpg_group):
"""
@brief Display a group of images
@param jpg_group The jpg group
"""
for i in range(0, replay_cycles): # show pics a few times
for i in range(1, total_pics + 1): # show each pic
show_image(output_path + jpg_group + "-0" + str(i) + ".jpg")
sleep(replay_delay) # pause
def make_led_blinking(pin, counter=5, duration=0.25):
"""
@brief Make blinking a led with oneline code
@param pin Led pin
@param counter Number of time the led blink
@param pin Duration between blink
"""
for x in range(0, counter):
GPIO.output(pin, True)
sleep(duration)
GPIO.output(pin, False)
sleep(duration)
def start_photobooth():
"""
@brief Define the photo taking function for when the big button is pressed
"""
# connect to global vars
global print_counter, print_error
#
# Begin Step 1
#
log("Get Ready from " + real_path)
GPIO.output(led_pin, False)
GPIO.output(print_led_pin, False)
show_image(real_path + "/instructions.png")
sleep(prep_delay)
# clear the screen
clear_screen()
camera = picamera.PiCamera()
if not config.camera_color_preview:
camera.saturation = -100
camera.iso = config.camera_iso
# set camera resolution to high res
camera.resolution = (high_res_w, high_res_h)
#
# Begin Step 2
#
log("Taking pics")
# get the current timestamp, and remove milliseconds
now = str(time.time()).split('.')[0]
try: # take the photos
for i in range(1, total_pics + 1):
filename = output_path + now + '-0' + str(i) + '.jpg'
show_image(real_path + "/pose" + str(i) + ".png")
sleep(capture_delay) # pause in-between shots
clear_screen()
# preview a mirror image
camera.hflip = True
camera.start_preview(resolution=(preview_w, preview_h))
sleep(2) # warm up camera
GPIO.output(led_pin, True) # turn on the LED
camera.hflip = False # flip back when taking photo
# Play sound
capture.play()
sleep(0.5) # Wait 500 ms for the sound to coincide with the capture of the picture.
# Capture!
camera.capture(filename)
log("Capture : " + filename)
camera.stop_preview()
GPIO.output(led_pin, False) # turn off the LED
except Exception, e:
tb = sys.exc_info()[2]
traceback.print_exception(e.__class__, e, tb)
pygame.quit()
finally:
camera.close()
#
# Begin Step 3
#
show_image(real_path + "/processing.png")
if config.make_gifs: # make the gifs
log("Creating an animated gif")
# make an animated gif
graphicsmagick = "gm convert -delay " + \
str(gif_delay) + " " + output_path + now + \
"*.jpg " + output_path + now + ".gif"
os.system(graphicsmagick) # make the .gif
log("Creating a photo booth picture")
photobooth_image(now)
# reset print counter
print_counter = 0
#
# Begin Step 4
#
try:
display_pics(now)
except Exception, e:
tb = sys.exc_info()[2]
traceback.print_exception(e.__class__, e, tb)
pygame.quit()
log("Done")
show_image(real_path + "/finished.png")
sleep(restart_delay)
show_intro()
# turn on the LED
GPIO.output(led_pin, True)
if print_error == 'OK':
GPIO.output(print_led_pin, True)
def shutdown(channel):
"""
@brief Shutdown the RaspberryPi
config sudoers to be available to execute shutdown whitout password
Add this line in file /etc/sudoers
myUser ALL = (root) NOPASSWD: /sbin/halt
"""
print("Your RaspberryPi will be shut down in few seconds...")
pygame.quit()
GPIO.cleanup()
os.system("sudo halt -p")
def photobooth_image(now):
# connect to global vars
global last_image_save
# Load images
bgimage = pygame.image.load(real_path + "/bgimage.png")
image1 = pygame.image.load(output_path + now + "-01.jpg")
image2 = pygame.image.load(output_path + now + "-02.jpg")
image3 = pygame.image.load(output_path + now + "-03.jpg")
image4 = pygame.image.load(output_path + now + "-04.jpg")
# Rotate Background
if not config.camera_landscape:
bgimage = pygame.transform.rotate(bgimage, 270)
# Resize images
bgimage = pygame.transform.scale(bgimage, (output_w, output_h))
image1 = pygame.transform.scale(image1, (image_w, image_h))
image2 = pygame.transform.scale(image2, (image_w, image_h))
image3 = pygame.transform.scale(image3, (image_w, image_h))
image4 = pygame.transform.scale(image4, (image_w, image_h))
# Merge images
bgimage.blit(image1, (margin, margin))
bgimage.blit(image2, (margin * 2 + image_w, margin))
bgimage.blit(image3, (margin, margin * 2 + image_h))
bgimage.blit(image4, (margin * 2 + image_w, margin * 2 + image_h))
# Check directory is writable
if (os.access(output_path_photobooth, os.W_OK)):
last_image_save = output_path_photobooth + now + ".jpg"
pygame.image.save(bgimage, last_image_save)
if config.debug_mode:
log("INFO last image save: " + last_image_save)
else:
log("ERROR path not writeable: " + output_path_photobooth)
def print_image():
# connect to global vars
global print_counter, print_error
# Connect to cups and select printer 0
conn = cups.Connection()
printers = conn.getPrinters()
printer_name = printers.keys()[0]
if print_error != 'OK':
log("Printer restart after error")
# restart printer
conn.disablePrinter(printer_name)
sleep(2)
conn.enablePrinter(printer_name)
print_error = 'OK'
GPIO.output(print_led_pin, True) # Turn LED on
show_intro() # Reset screen
return # End here, printer should restart jobs pendings on the queue
# Check if printer status is available
# 3 => Printer is ready!
# 4 => is printing, but OK, push to printing queue
# 5 => Failure, no paper tray, no paper, ribbon depleted
printerAtt = conn.getPrinterAttributes(printer_name)
log("Printer status : (" + str(printerAtt['printer-state']) + ") " + printerAtt['printer-state-message'])
if (printerAtt['printer-state'] == 5):
log("Printer error : (" + str(printerAtt['printer-state']) + ") " + printerAtt['printer-state-message'])
make_led_blinking(print_led_pin, 6, 0.15) # LED blinking
print_error = printerAtt['printer-state-message']
show_intro()
return # End here, led is Off, wait for human action
if not os.path.isfile(last_image_save):
log("No image " + " : " + last_image_save)
elif print_counter < config.max_print:
print_counter += 1 # increase counter
GPIO.output(print_led_pin, False)
# Launch printing
if not config.debug_mode:
conn.printFile(printer_name, last_image_save, "PhotoBooth", {})
show_image_print(last_image_save)
log("Launch printing request on " + printer_name + " : " + last_image_save)
sleep(1)
# Turn LED on
GPIO.output(print_led_pin, True)
else:
make_led_blinking(print_led_pin, 3, 0.15) # LED blinking, at the end LED is off
log("You have reach print quota for image " + " : " + last_image_save)
def show_intro():
global print_error
if (print_error == 'OK'):
show_image(real_path + "/intro.png")
elif (print_error == 'Ribbon depleted!'):
show_image(real_path + "/error_ink.png")
elif (print_error == 'Paper feed problem!' or print_error == 'No paper tray loaded, aborting!'):
show_image(real_path + "/error_paper.png")
elif (print_error == 'Printer open failure (No suitable printers found!)'):
show_image(real_path + "/error_printer_off.png")
else:
show_image(real_path + "/error_printer.png")
##################
# Main Program #
##################
# clear the previously stored pics based on config settings
if config.clear_on_startup:
clear_pics(1)
# Add event listener to catch shutdown request
if config.enable_shutdown_btn:
GPIO.add_event_detect(shutdown_btn_pin, GPIO.FALLING, callback=shutdown, bouncetime=config.debounce)
# If printing enable, add event listener on print button
if config.enable_print_btn:
GPIO.add_event_detect(print_btn_pin, GPIO.FALLING, bouncetime=config.debounce)
# Setup button start_photobooth
# DON'T USE THREADED CALLBACKS
GPIO.add_event_detect(btn_pin, GPIO.FALLING, bouncetime=config.debounce)
log("Photo booth app running...")
# blink light to show the app is running
make_led_blinking((print_led_pin, led_pin)) # LED blinking
show_image(real_path + "/intro.png")
# turn on the light showing users they can push the button
GPIO.output(led_pin, True)
GPIO.output(print_led_pin, True)
while True:
sleep(1)
# Keyboard shortcuts
for event in pygame.event.get():
# pygame.QUIT is sent when the user clicks the window's "X" button
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
sys.exit()
# Start photobooth with key "space"
elif event.type == KEYDOWN and event.key == K_SPACE:
start_photobooth()
# Print last image with key "P"
elif event.type == KEYDOWN and event.key == K_p:
print_image()
# Detect event on start button
if GPIO.event_detected(btn_pin):
start_photobooth()
if config.enable_print_btn and GPIO.event_detected(print_btn_pin):
print_image()
| |
from insights.parsers import httpd_conf
from insights.parsers.httpd_conf import HttpdConf
from insights.tests import context_wrap
import doctest
HTTPD_CONF_1 = """
ServerRoot "/etc/httpd"
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
SSLProtocol -ALL +SSLv3
#SSLProtocol all -SSLv2
NSSProtocol SSLV3 TLSV1.0
#NSSProtocol ALL
# prefork MPM
<IfModule prefork.c>
StartServers 8
MinSpareServers 5
MaxSpareServers 20
ServerLimit 256
MaxClients 256
MaxRequestsPerChild 200
</IfModule>
# worker MPM
<IfModule worker.c>
StartServers 4
MaxClients 300
MinSpareThreads 25
MaxSpareThreads 75
ThreadsPerChild 25
MaxRequestsPerChild 0
</IfModule>
LoadModule auth_basic_module modules/mod_auth_basic.so
LoadModule auth_digest_module modules/mod_auth_digest.so
""".strip()
HTTPD_CONF_PATH = "/etc/httpd/conf/httpd.conf"
HTTPD_CONF_D_PATH = "/etc/httpd/conf.d/default.conf"
HTTPD_CONF_D_1 = """
SSLProtocol -ALL +SSLv3
#SSLProtocol all -SSLv2
#SSLCipherSuite ALL:!ADH:!EXPORT:!SSLv2:RC4+RSA:+HIGH:+MEDIUM:+LOW
SSLCipherSuite ALL:!ADH:!EXPORT:!SSLv2:RC4+RSA:+HIGH:+MEDIUM:+LOW
# MaxClients: maximum number of server processes allowed to start
MaxClients
""".strip()
HTTPD_CONF_SPLIT = '''
LogLevel warn
IncludeOptional conf.d/*.conf
EnableSendfile on
'''.strip()
HTTPD_CONF_MORE = '''
UserDir disable
UserDir enable bob
'''.strip()
HTTPD_CONF_NEST_1 = """
<VirtualHost 192.0.2.1>
<Directory /var/www/example>
Options FollowSymLinks
AllowOverride None
</Directory>
<IfModule mod_php4.c>
php_admin_flag safe_mode Off
php_admin_value register_globals 0
php_value magic_quotes_gpc 0
php_value magic_quotes_runtime 0
php_value allow_call_time_pass_reference 0
</IfModule>
DirectoryIndex index.php
<IfModule mod_rewrite.c>
RewriteEngine On
RewriteRule .* /index.php
</IfModule>
<IfModule mod_rewrite.c>
RewriteEngine Off
</IfModule>
DocumentRoot /var/www/example
ServerName www.example.com
ServerAlias admin.example.com
</VirtualHost>
""".strip()
HTTPD_CONF_NEST_2 = """
<IfModule !php5_module>
Testphp php5_1
<IfModule !php4_module>
Testphp php4_1
<Location />
<FilesMatch ".php[45]?$">
Order allow,deny
Deny from all
</FilesMatch>
<FilesMatch ".php[45]?$">
Order deny,allow
</FilesMatch>
</Location>
Testphp php4_2
</IfModule>
Testphp php5_2
</IfModule>
<IfModule !php5_module>
Testphp php5_3
JustATest on
</IfModule>
""".strip()
HTTPD_CONF_NO_NAME_SEC = """
<RequireAll>
AuthName "NAME Access"
Require valid-user
</RequireAll>
""".strip()
HTTPD_CONF_DOC = '''
ServerRoot "/etc/httpd"
LoadModule auth_basic_module modules/mod_auth_basic.so
LoadModule auth_digest_module modules/mod_auth_digest.so
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<IfModule mod_mime_magic.c>
# MIMEMagicFile /usr/share/magic.mime
MIMEMagicFile conf/magic
</IfModule>
ErrorLog "|/usr/sbin/httplog -z /var/log/httpd/error_log.%Y-%m-%d"
SSLProtocol -ALL +SSLv3
#SSLProtocol all -SSLv2
NSSProtocol SSLV3 TLSV1.0
#NSSProtocol ALL
# prefork MPM
<IfModule prefork.c>
StartServers 8
MinSpareServers 5
MaxSpareServers 20
ServerLimit 256
MaxClients 256
MaxRequestsPerChild 200
</IfModule>
# worker MPM
<IfModule worker.c>
StartServers 4
MaxClients 300
MinSpareThreads 25
MaxSpareThreads 75
ThreadsPerChild 25
MaxRequestsPerChild 0
</IfModule>
'''.strip()
def test_get_httpd_conf_nest_1():
context = context_wrap(HTTPD_CONF_NEST_1, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert result[("VirtualHost", "192.0.2.1")][("IfModule", "mod_php4.c")]['php_admin_flag'][-1].value == "safe_mode Off"
assert result[("VirtualHost", "192.0.2.1")][("IfModule", "mod_rewrite.c")]['RewriteEngine'][-1].value == "Off"
assert result[("VirtualHost", "192.0.2.1")][("IfModule", "mod_rewrite.c")]['RewriteRule'][-1].value == ".* /index.php"
assert result[("VirtualHost", "192.0.2.1")]['ServerName'][-1].value == "www.example.com"
def test_get_httpd_conf_nest_2():
context = context_wrap(HTTPD_CONF_NEST_2, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert result[("IfModule", "!php5_module")] == {
'Testphp': [
('php5_1', 'Testphp php5_1', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf'),
('php5_2', 'Testphp php5_2', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf'),
('php5_3', 'Testphp php5_3', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf')],
'JustATest': [
('on', 'JustATest on', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf')],
('IfModule', '!php4_module'): {
('Location', '/'): {
('FilesMatch', '".php[45]?$"'): {
'Deny': [
('from all', 'Deny from all', 'FilesMatch', '".php[45]?$"', 'httpd.conf', '/etc/httpd/conf/httpd.conf')],
'Order': [
('allow,deny', 'Order allow,deny', 'FilesMatch', '".php[45]?$"', 'httpd.conf', '/etc/httpd/conf/httpd.conf'),
('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]?$"', 'httpd.conf', '/etc/httpd/conf/httpd.conf')]
}
},
'Testphp': [
('php4_1', 'Testphp php4_1', 'IfModule', '!php4_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf'),
('php4_2', 'Testphp php4_2', 'IfModule', '!php4_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf')]
}
}
def test_get_httpd_conf_1():
context = context_wrap(HTTPD_CONF_1, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert "SSLCipherSuite" not in result
assert result['ServerRoot'][0].value == '/etc/httpd'
assert "SSLV3 TLSV1.0" in result["NSSProtocol"][-1]
assert result[("IfModule", "prefork.c")]["MaxClients"][-1].value == "256"
assert result[("IfModule", "worker.c")]["MaxClients"][-1].value == "300"
assert type(result[("IfModule", "worker.c")]) is dict
assert result.file_path == HTTPD_CONF_PATH
assert 'ThreadsPerChild' not in result[('IfModule', 'prefork.c')]
assert result[('IfModule', 'prefork.c')]['MaxRequestsPerChild'][-1].value == '200'
assert result.file_name == "httpd.conf"
assert result['LoadModule'][0].value == 'auth_basic_module modules/mod_auth_basic.so'
assert result['LoadModule'][-1].value == 'auth_digest_module modules/mod_auth_digest.so'
assert result['Directory', '/']['Options'][-1].value == 'FollowSymLinks'
def test_get_httpd_conf_2():
context = context_wrap(HTTPD_CONF_D_1, path=HTTPD_CONF_D_PATH)
result = HttpdConf(context)
except_SSLC = 'ALL:!ADH:!EXPORT:!SSLv2:RC4+RSA:+HIGH:+MEDIUM:+LOW'
assert result["SSLProtocol"] == [('-ALL +SSLv3', 'SSLProtocol -ALL +SSLv3', None, None, result.file_name, result.file_path)]
assert result["SSLCipherSuite"][-1].value == except_SSLC
assert "NSSProtocol" not in result
assert "MaxClients" not in result
assert result.file_path == HTTPD_CONF_D_PATH
assert result.file_name == "default.conf"
assert result["SSLProtocol"][-1].value == '-ALL +SSLv3'
assert result["SSLProtocol"][-1].line == 'SSLProtocol -ALL +SSLv3'
def test_main_config_splitting():
context = context_wrap(HTTPD_CONF_SPLIT, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert result.file_path == HTTPD_CONF_PATH
assert result.file_name == "httpd.conf"
assert result['LogLevel'] == [('warn', 'LogLevel warn', None, None, result.file_name, result.file_path)]
assert result['EnableSendfile'] == [('on', 'EnableSendfile on', None, None, result.file_name, result.file_path)]
assert result.first_half['LogLevel'][-1].value == 'warn'
assert result.first_half['LogLevel'][-1].line == 'LogLevel warn'
assert result.second_half['EnableSendfile'][-1].value == 'on'
def test_main_config_no_splitting():
context = context_wrap(HTTPD_CONF_1, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert result.file_path == HTTPD_CONF_PATH
assert result.file_name == "httpd.conf"
assert result.data == result.first_half
assert result.second_half == {}
def test_main_config_no_main_config():
context = context_wrap(HTTPD_CONF_D_1, path=HTTPD_CONF_D_PATH)
result = HttpdConf(context)
assert result.first_half == {}
assert result.second_half == {}
def test_multiple_values_for_directive():
context = context_wrap(HTTPD_CONF_MORE, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert result.file_path == HTTPD_CONF_PATH
assert result.file_name == "httpd.conf"
assert result['UserDir'] == [
('disable', 'UserDir disable', None, None, result.file_name, result.file_path),
('enable bob', 'UserDir enable bob', None, None, result.file_name, result.file_path)]
assert len(result['UserDir']) == 2
assert result['UserDir'][0].value == 'disable'
assert result['UserDir'][1].value == 'enable bob'
def test_no_name_section():
context = context_wrap(HTTPD_CONF_NO_NAME_SEC, path=HTTPD_CONF_PATH)
result = HttpdConf(context)
assert result[("RequireAll", "")]["AuthName"][-1].value == "NAME Access"
assert result[("RequireAll", "")]["Require"][-1].value == "valid-user"
def test_doc():
env = {
'HttpdConf': HttpdConf,
'httpd_conf': HttpdConf(context_wrap(HTTPD_CONF_DOC, path='/path')),
}
failed, total = doctest.testmod(httpd_conf, globs=env)
assert failed == 0
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from neutron import context
from neutron import manager
from neutron.plugins.ml2 import config
from neutron.tests.unit.plugins.ml2.drivers import ext_test
from neutron.tests.unit.plugins.ml2 import test_plugin
class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
_extension_drivers = ['test']
def setUp(self):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(ExtensionDriverTestCase, self).setUp()
self._plugin = manager.NeutronManager.get_plugin()
self._ctxt = context.get_admin_context()
def _verify_network_create(self, code, exc_reason):
tenant_id = str(uuid.uuid4())
data = {'network': {'name': 'net1',
'tenant_id': tenant_id}}
req = self.new_create_request('networks', data)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
network = self.deserialize(self.fmt, res)
if exc_reason:
self.assertEqual(exc_reason,
network['NeutronError']['type'])
return (network, tenant_id)
def _verify_network_update(self, network, code, exc_reason):
net_id = network['network']['id']
new_name = 'a_brand_new_name'
data = {'network': {'name': new_name}}
req = self.new_update_request('networks', data, net_id)
res = req.get_response(self.api)
self.assertEqual(code, res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual(exc_reason,
error['NeutronError']['type'])
def test_faulty_process_create(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_create_network',
side_effect=TypeError):
net, tenant_id = self._verify_network_create(500,
'HTTPInternalServerError')
# Verify the operation is rolled back
query_params = "tenant_id=%s" % tenant_id
nets = self._list('networks', query_params=query_params)
self.assertFalse(nets['networks'])
def test_faulty_process_update(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_network',
side_effect=TypeError):
network, tid = self._verify_network_create(201, None)
self._verify_network_update(network, 500,
'HTTPInternalServerError')
def test_faulty_extend_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'extend_network_dict',
side_effect=[None, None, TypeError]):
network, tid = self._verify_network_create(201, None)
self._verify_network_update(network, 400, 'ExtensionDriverError')
def test_network_attr(self):
with self.network() as network:
# Test create network
ent = network['network'].get('network_extension')
self.assertIsNotNone(ent)
# Test list networks
res = self._list('networks')
val = res['networks'][0].get('network_extension')
self.assertEqual('default_network_extension', val)
# Test network update
data = {'network':
{'network_extension': 'Test_Network_Extension_Update'}}
res = self._update('networks', network['network']['id'], data)
val = res['network'].get('network_extension')
self.assertEqual('Test_Network_Extension_Update', val)
def test_subnet_attr(self):
with self.subnet() as subnet:
# Test create subnet
ent = subnet['subnet'].get('subnet_extension')
self.assertIsNotNone(ent)
# Test list subnets
res = self._list('subnets')
val = res['subnets'][0].get('subnet_extension')
self.assertEqual('default_subnet_extension', val)
# Test subnet update
data = {'subnet':
{'subnet_extension': 'Test_Subnet_Extension_Update'}}
res = self._update('subnets', subnet['subnet']['id'], data)
val = res['subnet'].get('subnet_extension')
self.assertEqual('Test_Subnet_Extension_Update', val)
def test_port_attr(self):
with self.port() as port:
# Test create port
ent = port['port'].get('port_extension')
self.assertIsNotNone(ent)
# Test list ports
res = self._list('ports')
val = res['ports'][0].get('port_extension')
self.assertEqual('default_port_extension', val)
# Test port update
data = {'port': {'port_extension': 'Test_Port_Extension_Update'}}
res = self._update('ports', port['port']['id'], data)
val = res['port'].get('port_extension')
self.assertEqual('Test_Port_Extension_Update', val)
def test_extend_network_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_network') as ext_update_net,\
mock.patch.object(ext_test.TestExtensionDriver,
'extend_network_dict') as ext_net_dict,\
self.network() as network:
net_id = network['network']['id']
net_data = {'network': {'id': net_id}}
self._plugin.update_network(self._ctxt, net_id, net_data)
self.assertTrue(ext_update_net.called)
self.assertTrue(ext_net_dict.called)
def test_extend_subnet_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_subnet') as ext_update_subnet,\
mock.patch.object(ext_test.TestExtensionDriver,
'extend_subnet_dict') as ext_subnet_dict,\
self.subnet() as subnet:
subnet_id = subnet['subnet']['id']
subnet_data = {'subnet': {'id': subnet_id}}
self._plugin.update_subnet(self._ctxt, subnet_id, subnet_data)
self.assertTrue(ext_update_subnet.called)
self.assertTrue(ext_subnet_dict.called)
def test_extend_port_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_port') as ext_update_port,\
mock.patch.object(ext_test.TestExtensionDriver,
'extend_port_dict') as ext_port_dict,\
self.port() as port:
port_id = port['port']['id']
port_data = {'port': {'id': port_id}}
self._plugin.update_port(self._ctxt, port_id, port_data)
self.assertTrue(ext_update_port.called)
self.assertTrue(ext_port_dict.called)
class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
_extension_drivers = ['testdb']
def setUp(self):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(DBExtensionDriverTestCase, self).setUp()
self._plugin = manager.NeutronManager.get_plugin()
self._ctxt = context.get_admin_context()
def test_network_attr(self):
with self.network() as network:
# Test create with default value.
net_id = network['network']['id']
val = network['network']['network_extension']
self.assertEqual("", val)
res = self._show('networks', net_id)
val = res['network']['network_extension']
self.assertEqual("", val)
# Test list.
res = self._list('networks')
val = res['networks'][0]['network_extension']
self.assertEqual("", val)
# Test create with explicit value.
res = self._create_network(self.fmt,
'test-network', True,
arg_list=('network_extension', ),
network_extension="abc")
network = self.deserialize(self.fmt, res)
net_id = network['network']['id']
val = network['network']['network_extension']
self.assertEqual("abc", val)
res = self._show('networks', net_id)
val = res['network']['network_extension']
self.assertEqual("abc", val)
# Test update.
data = {'network': {'network_extension': "def"}}
res = self._update('networks', net_id, data)
val = res['network']['network_extension']
self.assertEqual("def", val)
res = self._show('networks', net_id)
val = res['network']['network_extension']
self.assertEqual("def", val)
def test_subnet_attr(self):
with self.subnet() as subnet:
# Test create with default value.
net_id = subnet['subnet']['id']
val = subnet['subnet']['subnet_extension']
self.assertEqual("", val)
res = self._show('subnets', net_id)
val = res['subnet']['subnet_extension']
self.assertEqual("", val)
# Test list.
res = self._list('subnets')
val = res['subnets'][0]['subnet_extension']
self.assertEqual("", val)
with self.network() as network:
# Test create with explicit value.
data = {'subnet':
{'network_id': network['network']['id'],
'cidr': '10.1.0.0/24',
'ip_version': '4',
'tenant_id': self._tenant_id,
'subnet_extension': 'abc'}}
req = self.new_create_request('subnets', data, self.fmt)
res = req.get_response(self.api)
subnet = self.deserialize(self.fmt, res)
subnet_id = subnet['subnet']['id']
val = subnet['subnet']['subnet_extension']
self.assertEqual("abc", val)
res = self._show('subnets', subnet_id)
val = res['subnet']['subnet_extension']
self.assertEqual("abc", val)
# Test update.
data = {'subnet': {'subnet_extension': "def"}}
res = self._update('subnets', subnet_id, data)
val = res['subnet']['subnet_extension']
self.assertEqual("def", val)
res = self._show('subnets', subnet_id)
val = res['subnet']['subnet_extension']
self.assertEqual("def", val)
def test_port_attr(self):
with self.port() as port:
# Test create with default value.
net_id = port['port']['id']
val = port['port']['port_extension']
self.assertEqual("", val)
res = self._show('ports', net_id)
val = res['port']['port_extension']
self.assertEqual("", val)
# Test list.
res = self._list('ports')
val = res['ports'][0]['port_extension']
self.assertEqual("", val)
with self.network() as network:
# Test create with explicit value.
res = self._create_port(self.fmt,
network['network']['id'],
arg_list=('port_extension', ),
port_extension="abc")
port = self.deserialize(self.fmt, res)
port_id = port['port']['id']
val = port['port']['port_extension']
self.assertEqual("abc", val)
res = self._show('ports', port_id)
val = res['port']['port_extension']
self.assertEqual("abc", val)
# Test update.
data = {'port': {'port_extension': "def"}}
res = self._update('ports', port_id, data)
val = res['port']['port_extension']
self.assertEqual("def", val)
res = self._show('ports', port_id)
val = res['port']['port_extension']
self.assertEqual("def", val)
| |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from pathlib import Path
import warnings
import numpy as np
from nose.tools import (assert_almost_equal, assert_equal,
assert_greater_equal, assert_raises, ok_)
import pandas as pd
from pandas.testing import assert_frame_equal
import neurom as nm
from neurom.apps import morph_stats as ms
from neurom.exceptions import ConfigError
from neurom.features import NEURITEFEATURES, NEURONFEATURES
from numpy.testing import assert_array_equal
DATA_PATH = Path(__file__).parent.parent.parent.parent / 'test_data'
SWC_PATH = DATA_PATH / 'swc'
REF_CONFIG = {
'neurite': {
'section_lengths': ['max', 'total'],
'section_volumes': ['total'],
'section_branch_orders': ['max'],
'segment_midpoints': ['max'],
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'neuron': {
'soma_radii': ['mean'],
}
}
REF_OUT = {
'mean_soma_radius': 0.13065629648763766,
'axon': {
'total_section_length': 207.87975220908129,
'max_section_length': 11.018460736176685,
'max_section_branch_order': 10,
'total_section_volume': 276.73857657289523,
'max_segment_midpoint_0': 0.0,
'max_segment_midpoint_1': 0.0,
'max_segment_midpoint_2': 49.520305964149998,
},
'all': {
'total_section_length': 840.68521442251949,
'max_section_length': 11.758281556059444,
'max_section_branch_order': 10,
'total_section_volume': 1104.9077419665782,
'max_segment_midpoint_0': 64.401674984050004,
'max_segment_midpoint_1': 48.48197694465,
'max_segment_midpoint_2': 53.750947521650005,
},
'apical_dendrite': {
'total_section_length': 214.37304577550353,
'max_section_length': 11.758281556059444,
'max_section_branch_order': 10,
'total_section_volume': 271.9412385728449,
'max_segment_midpoint_0': 64.401674984050004,
'max_segment_midpoint_1': 0.0,
'max_segment_midpoint_2': 53.750947521650005,
},
'basal_dendrite': {
'total_section_length': 418.43241643793476,
'max_section_length': 11.652508126101711,
'max_section_branch_order': 10,
'total_section_volume': 556.22792682083821,
'max_segment_midpoint_0': 64.007872333250006,
'max_segment_midpoint_1': 48.48197694465,
'max_segment_midpoint_2': 51.575580778049996,
},
}
def test_name_correction():
assert_equal(ms._stat_name('foo', 'raw'), 'foo')
assert_equal(ms._stat_name('foos', 'raw'), 'foo')
assert_equal(ms._stat_name('foos', 'bar'), 'bar_foo')
assert_equal(ms._stat_name('foos', 'total'), 'total_foo')
assert_equal(ms._stat_name('soma_radii', 'total'), 'total_soma_radius')
assert_equal(ms._stat_name('soma_radii', 'raw'), 'soma_radius')
def test_eval_stats_raw_returns_list():
assert_equal(ms.eval_stats(np.array([1, 2, 3, 4]), 'raw'), [1, 2, 3, 4])
def test_eval_stats_empty_input_returns_none():
ok_(ms.eval_stats([], 'min') is None)
def test_eval_stats_total_returns_sum():
assert_equal(ms.eval_stats(np.array([1, 2, 3, 4]), 'total'), 10)
def test_eval_stats_on_empty_stat():
assert_equal(ms.eval_stats(np.array([]), 'mean'), None)
assert_equal(ms.eval_stats(np.array([]), 'std'), None)
assert_equal(ms.eval_stats(np.array([]), 'median'), None)
assert_equal(ms.eval_stats(np.array([]), 'min'), None)
assert_equal(ms.eval_stats(np.array([]), 'max'), None)
assert_equal(ms.eval_stats(np.array([]), 'raw'), [])
assert_equal(ms.eval_stats(np.array([]), 'total'), 0.0)
def test_eval_stats_applies_numpy_function():
modes = ('min', 'max', 'mean', 'median', 'std')
ref_array = np.arange(1, 10)
for m in modes:
assert_equal(ms.eval_stats(ref_array, m),
getattr(np, m)(ref_array))
def test_extract_stats_single_neuron():
nrn = nm.load_neuron(Path(SWC_PATH, 'Neuron.swc'))
res = ms.extract_stats(nrn, REF_CONFIG)
assert_equal(set(res.keys()), set(REF_OUT.keys()))
# Note: soma radius is calculated from the sphere that gives the area
# of the cylinders described in Neuron.swc
assert_almost_equal(res['mean_soma_radius'], REF_OUT['mean_soma_radius'])
for k in ('all', 'axon', 'basal_dendrite', 'apical_dendrite'):
assert_equal(set(res[k].keys()), set(REF_OUT[k].keys()))
for kk in res[k].keys():
assert_almost_equal(res[k][kk], REF_OUT[k][kk], places=3)
def test_extract_dataframe():
# Vanilla test
nrns = nm.load_neurons([Path(SWC_PATH, name)
for name in ['Neuron.swc', 'simple.swc']])
actual = ms.extract_dataframe(nrns, REF_CONFIG)
expected = pd.read_csv(Path(DATA_PATH, 'extracted-stats.csv'), header=[0, 1], index_col=0)
assert_frame_equal(actual, expected)
# Test with a single neuron in the population
nrns = nm.load_neurons(Path(SWC_PATH, 'Neuron.swc'))
actual = ms.extract_dataframe(nrns, REF_CONFIG)
assert_frame_equal(actual, expected.iloc[[0]], check_dtype=False)
# Test with a config without the 'neuron' key
nrns = nm.load_neurons([Path(SWC_PATH, name)
for name in ['Neuron.swc', 'simple.swc']])
config = {'neurite': {'section_lengths': ['total']},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL']}
actual = ms.extract_dataframe(nrns, config)
idx = pd.IndexSlice
expected = expected.loc[:, idx[:, ['name', 'total_section_length']]]
assert_frame_equal(actual, expected)
# Test with a FstNeuron argument
nrn = nm.load_neuron(Path(SWC_PATH, 'Neuron.swc'))
actual = ms.extract_dataframe(nrn, config)
assert_frame_equal(actual, expected.iloc[[0]], check_dtype=False)
# Test with a List[FstNeuron] argument
nrns = [nm.load_neuron(Path(SWC_PATH, name))
for name in ['Neuron.swc', 'simple.swc']]
actual = ms.extract_dataframe(nrns, config)
assert_frame_equal(actual, expected)
# Test with a List[Path] argument
nrns = [Path(SWC_PATH, name) for name in ['Neuron.swc', 'simple.swc']]
actual = ms.extract_dataframe(nrns, config)
assert_frame_equal(actual, expected)
# Test without any neurite_type keys, it should pick the defaults
config = {'neurite': {'total_length_per_neurite': ['total']}}
actual = ms.extract_dataframe(nrns, config)
expected_columns = pd.MultiIndex.from_tuples(
[('neuron', 'name'),
('axon', 'total_total_length_per_neurite'),
('basal_dendrite', 'total_total_length_per_neurite'),
('apical_dendrite', 'total_total_length_per_neurite'),
('all', 'total_total_length_per_neurite')])
expected = pd.DataFrame(
columns=expected_columns,
data=[['Neuron', 207.87975221, 418.43241644, 214.37304578, 840.68521442],
['simple', 15., 16., 0., 31., ]])
assert_frame_equal(actual, expected)
def test_extract_dataframe_multiproc():
nrns = nm.load_neurons([Path(SWC_PATH, name)
for name in ['Neuron.swc', 'simple.swc']])
with warnings.catch_warnings(record=True) as w:
actual = ms.extract_dataframe(nrns, REF_CONFIG, n_workers=2)
expected = pd.read_csv(Path(DATA_PATH, 'extracted-stats.csv'), index_col=0, header=[0, 1])
assert_frame_equal(actual, expected)
with warnings.catch_warnings(record=True) as w:
actual = ms.extract_dataframe(nrns, REF_CONFIG, n_workers=os.cpu_count() + 1)
assert_equal(len(w), 1, "Warning not emitted")
assert_frame_equal(actual, expected)
def test_get_header():
fake_results = {'fake_name0': REF_OUT,
'fake_name1': REF_OUT,
'fake_name2': REF_OUT,
}
header = ms.get_header(fake_results)
assert_equal(1 + 1 + 4 * (4 + 3), len(header)) # name + everything in REF_OUT
ok_('name' in header)
ok_('mean_soma_radius' in header)
def test_generate_flattened_dict():
fake_results = {'fake_name0': REF_OUT,
'fake_name1': REF_OUT,
'fake_name2': REF_OUT,
}
header = ms.get_header(fake_results)
rows = list(ms.generate_flattened_dict(header, fake_results))
assert_equal(3, len(rows)) # one for fake_name[0-2]
assert_equal(1 + 1 + 4 * (4 + 3), len(rows[0])) # name + everything in REF_OUT
def test_full_config():
config = ms.full_config()
assert_equal(set(config.keys()), {'neurite', 'neuron', 'neurite_type'})
assert_equal(set(config['neurite'].keys()), set(NEURITEFEATURES.keys()))
assert_equal(set(config['neuron'].keys()), set(NEURONFEATURES.keys()))
def test_sanitize_config():
assert_raises(ConfigError, ms.sanitize_config, {'neurite': []})
new_config = ms.sanitize_config({}) # empty
assert_equal(2, len(new_config)) # neurite & neuron created
full_config = {
'neurite': {
'section_lengths': ['max', 'total'],
'section_volumes': ['total'],
'section_branch_orders': ['max']
},
'neurite_type': ['AXON', 'APICAL_DENDRITE', 'BASAL_DENDRITE', 'ALL'],
'neuron': {
'soma_radii': ['mean']
}
}
new_config = ms.sanitize_config(full_config)
assert_equal(3, len(new_config)) # neurite, neurite_type & neuron
def test_multidimensional_features():
'''Features should be split into sub-features when they
are multidimensional.
This should be the case even when the feature is `None` or `[]`
The following neuron has no axon but the axon feature segment_midpoints for
the axon should still be made of 3 values (X, Y and Z)
Cf: https://github.com/BlueBrain/NeuroM/issues/859
'''
neuron = nm.load_neuron(Path(SWC_PATH, 'no-axon.swc'))
config = {'neurite': {'segment_midpoints': ['max']},
'neurite_type': ['AXON']}
actual = ms.extract_dataframe(neuron, config)
assert_array_equal(actual['axon'][['max_segment_midpoint_0',
'max_segment_midpoint_1',
'max_segment_midpoint_2']].values,
[[None, None, None]])
config = {'neurite': {'partition_pairs': ['max']}}
actual = ms.extract_dataframe(neuron, config)
assert_array_equal(actual['axon'][['max_partition_pair_0',
'max_partition_pair_1']].values,
[[None, None]])
| |
from django.test import TestCase, Client
import factory
from . import models
from .models import Photos, Album, Face
from django.contrib.auth.models import User
from django.core.files import File
from django.conf import settings
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = models.User
django_get_or_create = ('username',)
username = 'people'
class PhotoFactory(factory.DjangoModelFactory):
class Meta:
model = Photos
user = UserFactory.create(username='user1')
class AlbumFactory(factory.DjangoModelFactory):
class Meta:
model = Album
user = UserFactory.create(username='user2')
class TestPhoto(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.testphoto = Photos.objects.create(
user=cls.testuser
)
def test_photo_belongs_to_unique_user(self):
self.assertEqual(self.testphoto.user.username, self.username)
def test_new_empty_description(self):
self.assertEqual(self.testphoto.description, '')
def test_new_empty_title(self):
self.assertEqual(self.testphoto.title, '')
def test_photo_access(self):
self.assertEqual(self.testphoto.published, 'private')
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
models.Photos.objects.all().delete()
models.User.objects.all().delete()
class TestAlbum(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.testalbum = Album.objects.create(
user=cls.testuser
)
def test_user_album_exists(self):
self.assertIsNotNone(self.testalbum.user)
def test_new_empty_title(self):
self.assertEqual(self.testalbum.title, '')
def test_new_empty_desc(self):
self.assertEqual(self.testalbum.description, '')
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
models.Album.objects.all().delete()
models.User.objects.all().delete()
class TestAlbumListView(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.c = Client()
cls.res = cls.c.get('/profile/images/albums', follow=True)
def test_denied_if_no_login(self):
self.assertEqual(self.res.status_code, 200)
self.assertIn('Please Login', self.res.content)
def test_allowed_if_login(self):
assert self.c.login(
username=self.username,
password=self.password
)
self.res = self.c.get('/profile/images/albums', follow=True)
self.assertEqual(self.res.status_code, 200)
self.assertIn(self.username, self.res.content)
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
cls.c = None
cls.res = None
cls.password = None
cls.testuser = None
models.User.objects.all().delete()
class TestAlbumDetailView(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.testalbum = Album.objects.create(
user=cls.testuser,
title='test',
description='test'
)
cls.c = Client()
cls.res = cls.c.get('/profile/images/albums/1', follow=True)
def test_denied_if_no_login(self):
self.assertEqual(self.res.status_code, 200)
self.assertIn('Please Login', self.res.content)
def test_allowed_if_login(self):
assert self.c.login(
username=self.username,
password=self.password
)
self.res = self.c.get('/profile/images/albums/1', follow=True)
self.assertEqual(self.res.status_code, 200)
self.assertIn(self.username, self.res.content)
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
cls.c = None
cls.res = None
cls.password = None
cls.testuser = None
models.User.objects.all().delete()
Album.objects.all().delete()
class TestPhotoListView(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.testphoto = Photos.objects.create(
user=cls.testuser,
title='test',
description='test'
)
cls.c = Client()
cls.res = cls.c.get('/profile/images/library', follow=True)
def test_denied_if_no_login(self):
self.assertEqual(self.res.status_code, 200)
self.assertIn('Please Login', self.res.content)
def test_allowed_if_login(self):
assert self.c.login(
username=self.username,
password=self.password
)
self.res = self.c.get('/profile/images/library', follow=True)
self.assertEqual(self.res.status_code, 200)
self.assertIn(self.username, self.res.content)
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
cls.c = None
cls.res = None
cls.password = None
cls.testuser = None
models.User.objects.all().delete()
Photos.objects.all().delete()
class TestPhotoDetailView(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
with open(settings.BASE_DIR + '/imager_images/static/img/testface.jpg', 'rb') as fh:
cls.testphoto = Photos.objects.create(
user=cls.testuser,
title='test',
description='test',
image=File(fh)
)
cls.c = Client()
cls.res = cls.c.get(
'/profile/images/photos/' + str(Photos.objects.all()[0].id),
follow=True)
def test_denied_if_no_login(self):
self.assertEqual(self.res.status_code, 200)
self.assertIn('Please Login', self.res.content)
def test_allowed_if_login(self):
assert self.c.login(
username=self.username,
password=self.password
)
self.res = self.c.get(
'/profile/images/photos/' + str(Photos.objects.all()[0].id),
follow=True)
self.assertEqual(self.res.status_code, 200)
self.assertIn(self.username, self.res.content)
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
cls.c = None
cls.res = None
cls.password = None
cls.testuser = None
models.User.objects.all().delete()
Photos.objects.all().delete()
class TestPhotoEdit(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.testphoto = Photos.objects.create(
user=cls.testuser,
title='test',
description='test'
)
cls.testphoto2 = Photos.objects.create(
user=cls.testuser,
title='test2',
description='test2'
)
cls.c = Client()
cls.c.login(
username=cls.username,
password=cls.password
)
cls.edit = cls.c.get(
'/imager/photo/' + str(cls.testphoto.id) + '/edit/',
follow=True)
def test_edit_page_load(self):
form_fields = ['image', 'title', 'description', 'published']
self.assertEqual(
self.edit.context['form'].Meta.fields, form_fields)
def test_edit_image(self):
form = self.edit.context['form']
data = form.initial
data['image'] = self.testphoto2
resp = self.c.post('/imager/photo/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['image'], self.testphoto2)
def test_edit_title(self):
form = self.edit.context['form']
data = form.initial
data['image'] = self.testphoto
data['title'] = 'Monkey'
resp = self.c.post('/imager/photo/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['title'], 'Monkey')
def test_edit_desc(self):
form = self.edit.context['form']
data = form.initial
data['image'] = self.testphoto
data['description'] = 'This is a new description'
resp = self.c.post('/imager/photo/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['description'],
'This is a new description')
def test_edit_published(self):
form = self.edit.context['form']
data = form.initial
data['image'] = self.testphoto
data['published'] = 'Public'
resp = self.c.post('/imager/photo/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['published'], 'Public')
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
cls.c = None
cls.res = None
cls.password = None
cls.testuser = None
models.User.objects.all().delete()
Photos.objects.all().delete()
class TestAlbumEdit(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
cls.testphoto = Photos.objects.create(
user=cls.testuser,
title='test',
description='test'
)
cls.testphoto2 = Photos.objects.create(
user=cls.testuser,
title='test2',
description='test2'
)
cls.testalbum = Album.objects.create(
user=cls.testuser,
title='test',
description='test',
)
cls.c = Client()
cls.c.login(
username=cls.username,
password=cls.password
)
cls.edit = cls.c.get(
'/imager/album/' + str(cls.testalbum.id) + '/edit/',
follow=True)
def test_load_album_edit_page(self):
form_fields = ['title', 'description', 'published', 'photos', 'cover']
self.assertEqual(
self.edit.context['form'].Meta.fields, form_fields)
def test_edit_title(self):
form = self.edit.context['form']
data = form.initial
data['title'] = 'Update Title'
resp = self.c.post('/imager/album/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['title'], 'Update Title')
def test_edit_desc(self):
form = self.edit.context['form']
data = form.initial
data['description'] = 'Update Desc'
resp = self.c.post('/imager/album/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['description'], 'Update Desc')
def test_edit_published(self):
form = self.edit.context['form']
data = form.initial
data['published'] = 'Public'
resp = self.c.post('/imager/album/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['published'], 'Public')
def test_edit_photos_in_album(self):
form = self.edit.context['form']
data = form.initial
data['photos'] = self.testphoto
resp = self.c.post('/imager/album/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['photos'], self.testphoto)
def test_edit_album_cover(self):
form = self.edit.context['form']
data = form.initial
data['cover'] = self.testphoto
resp = self.c.post('/imager/album/add', data)
resp = self.edit
self.assertEqual(
resp.context['form'].initial['cover'], self.testphoto)
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
cls.c = None
cls.res = None
cls.password = None
cls.testuser = None
models.User.objects.all().delete()
Album.objects.all().delete()
class TestFaceDetect(TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls)
cls.username = 'person'
cls.password = 'password'
cls.testuser = models.User.objects.create_user(
username=cls.username,
password=cls.password
)
with open(settings.BASE_DIR + '/imager_images/static/img/testface.jpg', 'rb') as fh:
cls.testphoto = Photos.objects.create(
user=cls.testuser,
title='test',
description='test',
image=File(fh)
)
cls.c = Client()
cls.res = cls.c.get(
'/profile/images/photos/' + str(cls.testphoto.id) + '/detect',
follow=True)
def test_denied_if_no_login(self):
self.assertEqual(self.res.status_code, 200)
self.assertIn('Please Login', self.res.content)
def test_allowed_if_login(self):
assert self.c.login(
username=self.username,
password=self.password
)
self.res = self.c.get(
'/profile/images/photos/' + str(self.testphoto.id) + '/detect',
follow=True)
self.assertEqual(self.res.status_code, 200)
self.assertIn(self.username, self.res.content)
def test_face_detected(self):
self.assertGreater(len(Face.objects.all()), 0)
def test_face_rename(self):
self.res = self.c.post(
'/profile/photo/' + str(self.testphoto.id) + '/face/edit/',
{'id': 1, 'name': 'test'}
)
self.assertEqual(self.res.status_code, 200)
face = Face.objects.get(id=1)
self.assertEqual(face.name, 'test')
@classmethod
def tearDownClass(cls):
super(TestCase, cls)
Photos.objects.all().delete()
models.User.objects.all().delete()
| |
"""
This module contains functions that allow you to manipulate, encode or decode
strings and byte sequences.
"""
import base64
import re
import string
import itertools
import six
import codecs
from kwonly_args import kwonly_defaults
import pwnypack.main
import binascii
from six.moves import range
from six.moves.urllib.parse import quote, quote_plus, unquote, unquote_plus, urlencode, parse_qs
try:
from collections import Counter
except ImportError:
from counter import Counter
__all__ = [
'xor',
'find_xor_mask',
'rot13',
'caesar',
'enhex',
'dehex',
'enb64',
'deb64',
'enurlform',
'deurlform',
'enurlquote',
'deurlquote',
'frequency',
]
def xor(key, data):
"""
Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
"""
if type(key) is int:
key = six.int2byte(key)
key_len = len(key)
return b''.join(
six.int2byte(c ^ six.indexbytes(key, i % key_len))
for i, c in enumerate(six.iterbytes(data))
)
@kwonly_defaults
def find_xor_mask(data, alphabet=None, max_depth=3, min_depth=0, iv=None):
"""
Produce a series of bytestrings that when XORed together end up being
equal to ``data`` and only contain characters from the giving
``alphabet``. The initial state (or previous state) can be given as
``iv``.
Arguments:
data (bytes): The data to recreate as a series of XOR operations.
alphabet (bytes): The bytestring containing the allowed characters
for the XOR values. If ``None``, all characters except NUL bytes,
carriage returns and newlines will be allowed.
max_depth (int): The maximum depth to look for a solution.
min_depth (int): The minimum depth to look for a solution.
iv (bytes): Initialization vector. If ``None``, it will be assumed the
operation starts at an all zero string.
Returns:
A list of bytestrings that, when XOR'ed with ``iv`` (or just eachother
if ``iv` is not providede) will be the same as ``data``.
Examples:
Produce a series of strings that when XORed together will result in
the string 'pwnypack' using only ASCII characters in the range 65 to
96:
>>> from pwny import *
>>> find_xor_mask('pwnypack', alphabet=''.join(chr(c) for c in range(65, 97)))
[b'````````', b'AAAAABAA', b'QVOXQCBJ']
>>> xor(xor(b'````````', b'AAAAABAA'), b'QVOXQCBJ')
'pwnypack'
"""
if alphabet is None:
alphabet = set(i for i in range(256) if i not in (0, 10, 13))
else:
alphabet = set(six.iterbytes(alphabet))
if iv is None:
iv = b'\0' * len(data)
if len(data) != len(iv):
raise ValueError('length of iv differs from data')
if not min_depth and data == iv:
return []
data = xor(data, iv)
# Pre-flight check to see if we have all the bits we need.
mask = 0
for ch in alphabet:
mask |= ch
mask = ~mask
# Map all bytes in data into a {byte: [pos...]} dictionary, check
# if we have enough bits along the way.
data_map_tmpl = {}
for i, ch in enumerate(six.iterbytes(data)):
if ch & mask:
raise ValueError('Alphabet does not contain enough bits.')
data_map_tmpl.setdefault(ch, []).append(i)
# Let's try to find a solution.
for depth in range(max(min_depth, 1), max_depth + 1):
# Prepare for round.
data_map = data_map_tmpl.copy()
results = [[None] * len(data) for _ in range(depth)]
for values in itertools.product(*([alphabet] * (depth - 1))):
# Prepare cumulative mask for this combination of alphabet.
mask = 0
for value in values:
mask ^= value
for ch in list(data_map):
r = ch ^ mask
if r in alphabet:
# Found a solution for this character, mark the result.
pos = data_map.pop(ch)
for p in pos:
results[0][p] = r
for i, value in enumerate(values):
results[i + 1][p] = value
if not data_map:
# Aaaand.. We're done!
return [
b''.join(six.int2byte(b) for b in r)
for r in results
]
# No solution found at this depth. Increase depth, try again.
raise ValueError('No solution found.')
def caesar(shift, data, shift_ranges=('az', 'AZ')):
"""
Apply a caesar cipher to a string.
The caesar cipher is a substition cipher where each letter in the given
alphabet is replaced by a letter some fixed number down the alphabet.
If ``shift`` is ``1``, *A* will become *B*, *B* will become *C*, etc...
You can define the alphabets that will be shift by specifying one or more
shift ranges. The characters will than be shifted within the given ranges.
Args:
shift(int): The shift to apply.
data(str): The string to apply the cipher to.
shift_ranges(list of str): Which alphabets to shift.
Returns:
str: The string with the caesar cipher applied.
Examples:
>>> caesar(16, 'Pwnypack')
'Fmdofqsa'
>>> caesar(-16, 'Fmdofqsa')
'Pwnypack'
>>> caesar(16, 'PWNYpack', shift_ranges=('AZ',))
'FMDOpack'
>>> caesar(16, 'PWNYpack', shift_ranges=('Az',))
'`g^iFqsA'
"""
alphabet = dict(
(chr(c), chr((c - s + shift) % (e - s + 1) + s))
for s, e in map(lambda r: (ord(r[0]), ord(r[-1])), shift_ranges)
for c in range(s, e + 1)
)
return ''.join(alphabet.get(c, c) for c in data)
rot13_encode = codecs.getencoder('rot-13')
rot13 = lambda d: rot13_encode(d)[0]
rot13.__doc__ = """
Rotate all characters in the alphabets A-Z and a-z by 13 positions in the
alphabet. This is a :func:`caesar` shift of 13 along the fixed alphabets
``A-Z`` and ``a-z``.
Args:
d(str): The string to the apply the cipher to.
Returns:
str: The string with the rot13 cipher applied.
Examples:
>>> rot13('whax')
'junk'
>>> rot13('junk')
'whax'
"""
def enhex(d, separator=''):
"""
Convert bytes to their hexadecimal representation, optionally joined by a
given separator.
Args:
d(bytes): The data to convert to hexadecimal representation.
separator(str): The separator to insert between hexadecimal tuples.
Returns:
str: The hexadecimal representation of ``d``.
Examples:
>>> from pwny import *
>>> enhex(b'pwnypack')
'70776e797061636b'
>>> enhex(b'pwnypack', separator=' ')
'70 77 6e 79 70 61 63 6b'
"""
v = binascii.hexlify(d).decode('ascii')
if separator:
return separator.join(
v[i:i+2]
for i in range(0, len(v), 2)
)
else:
return v
dehex_clean = re.compile('[^a-fA-F0-9]')
dehex = lambda d: binascii.unhexlify(dehex_clean.sub('', d).encode('ascii'))
dehex.__doc__ = """
Convert a hexadecimal representation of a byte sequence to bytes. All
non-hexadecimal characters will be removed from the input.
Args:
d(str): The string of hexadecimal tuples.
Returns:
bytes: The byte sequence represented by ``d``.
Examples:
>>> from pwny import *
>>> dehex('70776e797061636b')
b'pwnypack'
>>> dhex('70 77 6e 79 70 61 63 6b')
b'pwnypack'
"""
enb64 = lambda d: base64.b64encode(d).decode('ascii')
enb64.__doc__ = """
Convert bytes to their base64 representation.
Args:
d(bytes): The data to convert to its base64 representation.
Returns:
str: The base64 representation of ``d``.
Example:
>>> from pwny import *
>>> enb64(b'pwnypack')
'cHdueXBhY2s='
"""
deb64 = lambda d: base64.b64decode(d.encode('ascii'))
deb64.__doc__ = """
Convert a base64 representation back to its original bytes.
Args:
d(str): The base64 representation to decode.
Returns:
bytes: The bytes represented by ``d``.
Example:
>>> from pwny import *
>>> deb64('cHdueXBhY2s=')
b'pwnypack'
"""
def enurlform(q):
"""
Convert a dictionary to a URL encoded query string.
Args:
q(dict): The query to encode.
Returns:
str: The urlencoded representation of ``q``.
Example:
>>> from pwny import *
>>> enurlform({'foo': 'bar', 'baz': ['quux', 'corge']})
'foo=bar&baz=quux&baz=corge'
"""
return urlencode(q, doseq=True)
def deurlform(d):
"""
Convert a URL encoded query string to a dictionary.
Args:
d(str): The URL encoded query string.
Returns:
dict: A dictionary containing each key and all its values as a list.
Example:
>>> from pwny import *
>>> deurlform('foo=bar&baz=quux&baz=corge')
{'foo': ['bar'], 'baz': ['quux', 'corge']}
"""
return parse_qs(d)
def enurlquote(v, plus=False):
"""
Percent encode a string for use in an URL.
Args:
v(str): The value to percent encode.
plus(bool): Use a plus symbol for spaces, otherwise use %20.
Returns:
str: The percent encoded string.
Example:
>>> from pwny import *
>>> enurlquote('Foo Bar/Baz', True)
'Foo+Bar/Baz
"""
return quote_plus(v) if plus else quote(v)
def deurlquote(d, plus=False):
"""
Decode a percent encoded string.
Args:
d(str): The percent encoded value to decode.
plus(bool): Parse a plus symbol as a space.
Returns:
str: The decoded version of the percent encoded of ``d``.
Example:
>>> from pwny import *
>>> deurlquote('Foo+Bar/Baz')
'Foo Bar/Baz'
"""
return unquote_plus(d) if plus else unquote(d)
frequency = lambda v: dict(Counter(v))
frequency.__doc__ = """
Perform a frequency analysis on a byte sequence or string.
Args:
d(bytes or str): The sequence to analyse.
Returns:
dict: A dictionary of unique elements in ``d`` and how often the occur.
Example:
>>> frequency('pwnypack')
{'a': 1, 'c': 1, 'k': 1, 'n': 1, 'p': 2, 'w': 1, 'y': 1}
"""
@pwnypack.main.register('xor')
def xor_app(parser, cmd, args): # pragma: no cover
"""
Xor a value with a key.
"""
parser.add_argument(
'-d', '--dec',
help='interpret the key as a decimal integer',
dest='type',
action='store_const',
const=int
)
parser.add_argument(
'-x', '--hex',
help='interpret the key as an hexadecimal integer',
dest='type',
action='store_const',
const=lambda v: int(v, 16)
)
parser.add_argument('key', help='the key to xor the value with')
parser.add_argument('value', help='the value to xor, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
if args.type is not None:
args.key = args.type(args.key)
return xor(args.key, pwnypack.main.binary_value_or_stdin(args.value))
@pwnypack.main.register('caesar')
def caesar_app(parser, cmd, args): # pragma: no cover
"""
Caesar crypt a value with a key.
"""
parser.add_argument('shift', type=int, help='the shift to apply')
parser.add_argument('value', help='the value to caesar crypt, read from stdin if omitted', nargs='?')
parser.add_argument(
'-s', '--shift-range',
dest='shift_ranges',
action='append',
help='specify a character range to shift (defaults to a-z, A-Z)'
)
args = parser.parse_args(args)
if not args.shift_ranges:
args.shift_ranges = ['az', 'AZ']
return caesar(args.shift, pwnypack.main.string_value_or_stdin(args.value), args.shift_ranges)
@pwnypack.main.register('rot13')
def rot13_app(parser, cmd, args): # pragma: no cover
"""
rot13 encrypt a value.
"""
parser.add_argument('value', help='the value to rot13, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
return rot13(pwnypack.main.string_value_or_stdin(args.value))
@pwnypack.main.register('enb64')
def enb64_app(parser, cmd, args): # pragma: no cover
"""
base64 encode a value.
"""
parser.add_argument('value', help='the value to base64 encode, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
return enb64(pwnypack.main.binary_value_or_stdin(args.value))
@pwnypack.main.register('deb64')
def deb64_app(parser, cmd, args): # pragma: no cover
"""
base64 decode a value.
"""
parser.add_argument('value', help='the value to base64 decode, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
return deb64(pwnypack.main.string_value_or_stdin(args.value))
@pwnypack.main.register('enhex')
def enhex_app(parser, cmd, args): # pragma: no cover
"""
hex encode a value.
"""
parser.add_argument('value', help='the value to hex encode, read from stdin if omitted', nargs='?')
parser.add_argument(
'--separator', '-s',
default='',
help='the separator to place between hex tuples'
)
args = parser.parse_args(args)
return enhex(pwnypack.main.binary_value_or_stdin(args.value), args.separator)
@pwnypack.main.register('dehex')
def dehex_app(parser, cmd, args): # pragma: no cover
"""
hex decode a value.
"""
parser.add_argument('value', help='the value to base64 decode, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
return dehex(pwnypack.main.string_value_or_stdin(args.value))
@pwnypack.main.register('enurlform')
def enurlform_app(parser, cmd, args): # pragma: no cover
"""
encode a series of key=value pairs into a query string.
"""
parser.add_argument('values', help='the key=value pairs to URL encode', nargs='+')
args = parser.parse_args(args)
return enurlform(dict(v.split('=', 1) for v in args.values))
@pwnypack.main.register('deurlform')
def deurlform_app(parser, cmd, args): # pragma: no cover
"""
decode a query string into its key value pairs.
"""
parser.add_argument('value', help='the query string to decode')
args = parser.parse_args(args)
return ' '.join('%s=%s' % (key, value) for key, values in deurlform(args.value).items() for value in values)
@pwnypack.main.register('frequency')
def frequency_app(parser, cmd, args): # pragma: no cover
"""
perform frequency analysis on a value.
"""
parser.add_argument('value', help='the value to analyse, read from stdin if omitted', nargs='?')
args = parser.parse_args(args)
data = frequency(six.iterbytes(pwnypack.main.binary_value_or_stdin(args.value)))
return '\n'.join(
'0x%02x (%c): %d' % (key, chr(key), value)
if key >= 32 and chr(key) in string.printable else
'0x%02x ---: %d' % (key, value)
for key, value in data.items()
)
| |
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
sys.path.append('../..')
import copy
import logging
import inspect
from mongolog.handlers import MongoHandler
from base_state import BaseState
from state_tool import after_get_parameter, get_need_verify_types, \
get_next_verify_action, generate_log_data,\
SUCCESS_STATUS, CRAWL_RETRY_TIMES
from setting.db_config import DB_CONFIG
from setting.status_code import STATUS_CODE
from worker.communicate import get_parameter, success_data, \
save_data, save_status, get_today_result
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
db_config = DB_CONFIG['state_log']
logger.addHandler(MongoHandler.to(host=db_config['host'],
port=db_config['port'],
db=db_config['db'],
collection=db_config['collection']))
def extend_log_func(state):
result = {'verify': {}}
result['verify']['need_verify_request'] = state.need_verify_request
result['verify']['sent_verify_request'] = state.sent_verify_request
result['verify']['now_verify_request'] = ''
return result
class StartState(BaseState):
def __init__(self, **kwargs):
super(StartState, self).__init__(**kwargs)
self.parameters['tel'] = kwargs['tel']
self.need_verify_request = []
self.sent_verify_request = []
self.now_verify_request = ''
self.need_parameters = \
self.crawler.need_parameters(**self.parameters)
need_parameters_dict = {'need_full_name': 0,
'need_id_card': 0,
'need_pin_pwd': 0,
'need_sms_verify': 0,
'need_captcha_verify': 0}
for parameter in self.need_parameters:
need_parameters_dict['need_{}'.format(parameter)] = 1
self.need_parameters = need_parameters_dict
self.data_sid = get_today_result(self.parameters['tel'])
self.set_current_state(receive=True)
def execute(self, **kwargs):
status = STATUS_CODE[SUCCESS_STATUS]
self.execute_message = status['message']
self.execute_status = status['status']
self.state_flag = 'WaitLogin'
message = generate_log_data(self, extend_log_func)
logger.info(message)
class WaitLoginState(BaseState):
def __init__(self, **kwargs):
super(WaitLoginState, self).__init__(**kwargs)
self.next_step = 'Login'
self.login_verify_type = \
self.crawler.get_login_verify_type(**self.parameters)
self.need_verify_request = \
get_need_verify_types(self.login_verify_type)
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
tmp_next_action = get_next_verify_action(self.need_verify_request,
self.sent_verify_request)
if not tmp_next_action:
self.next_action = self.next_step
else:
self.next_action = 'Get{}'.format(tmp_next_action)
self.set_current_state()
def execute(self, **kwargs):
targets = [self.next_step]
for verify_type in self.need_verify_request:
send_target = 'Get{}'.format(verify_type)
targets.append(send_target)
parameter_status, parameter_timeout, action, parameters = \
get_parameter(sid=self.sid, targets=targets)
status_code, state_flag = after_get_parameter(parameter_status,
parameter_timeout,
action)
if not state_flag:
self.parameters.update(parameters)
if action == self.next_step:
self.state_flag = 'UnderLogin'
else:
self.now_verify_request = action.remove('Get')
self.state_flag = 'UnderLoginVerifyRequest'
else:
self.state_flag = state_flag
self.execute_status = status_code['status']
self.execute_message = status_code['message']
message = generate_log_data(self, extend_log_func)
logger.info(message)
class UnderLoginVerifyRequestState(BaseState):
def __init__(self, **kwargs):
super(UnderLoginVerifyRequestState, self).__init__(**kwargs)
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = self.pre_state.now_verify_request
self.set_current_state(receive=True)
def execute(self, **kwargs):
if self.now_verify_request == 'SMS':
key, level, message, image_str = \
self.crawler.send_login_sms_request(**self.parameters)
elif self.now_verify_request == 'Captcha':
key, level, message, image_str = \
self.crawler.send_login_captcha_request(**self.parameters)
if level == 0:
status_code = STATUS_CODE[SUCCESS_STATUS]
self.sent_verify_request.append(self.now_verify_request)
self.state_flag = 'WaitLogin'
else:
status_code = STATUS_CODE[key]
self.state_flag = 'Failed'
self.verify_content = image_str
self.execute_status = status_code['status']
self.execute_message = message
message = generate_log_data(self, extend_log_func)
logger.info(message)
class UnderLoginState(BaseState):
def __init__(self, **kwargs):
super(UnderLoginState, self).__init__(**kwargs)
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
self.set_current_state(receive=True)
def execute(self, **kwargs):
key, level, message = self.crawler.login(**self.parameters)
self.verify_type = \
self.crawler.get_verify_type(**self.parameters)
if level == 0:
status_code = STATUS_CODE[SUCCESS_STATUS]
if self.data_sid:
self.state_flag = 'NoCrawl'
elif not self.verify_type:
self.state_flag = 'UnderCrawl'
else:
self.state_flag = 'WaitCode'
elif level in [1, 2]:
status_code = STATUS_CODE[key]
self.state_flag = 'WaitLogin'
else:
status_code = STATUS_CODE[key]
self.state_flag = 'Failed'
self.execute_status = status_code['status']
self.execute_message = message
message = generate_log_data(self, extend_log_func)
logger.info(message)
class WaitCodeState(BaseState):
def __init__(self, **kwargs):
super(WaitCodeState, self).__init__(**kwargs)
self.next_step = 'Verify'
self.need_verify_request = \
get_need_verify_types(self.verify_type)
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
tmp_next_action = get_next_verify_action(self.need_verify_request,
self.sent_verify_request)
if not tmp_next_action:
self.next_action = self.next_step
else:
self.next_action = 'Get{}'.format(tmp_next_action)
self.set_current_state()
def execute(self, **kwargs):
targets = [self.next_step]
for verify_type in self.need_verify_request:
send_target = 'Get{}'.format(verify_type)
targets.append(send_target)
parameter_status, parameter_timeout, action, parameters = \
get_parameter(sid=self.sid, targets=targets)
status_code, state_flag = after_get_parameter(parameter_status,
parameter_timeout,
action)
if not state_flag:
self.parameters.update(parameters)
if action == self.next_step:
self.state_flag = 'UnderVerify'
else:
self.now_verify_request = action.remove('Get')
self.state_flag = 'UnderVerifyRequest'
else:
self.state_flag = state_flag
self.execute_status = status_code['status']
self.execute_message = status_code['message']
message = generate_log_data(self, extend_log_func)
logger.info(message)
class UnderVerifyRequestState(BaseState):
def __init__(self, **kwargs):
super(UnderVerifyRequestState, self).__init__(**kwargs)
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = self.pre_state.now_verify_request
self.set_current_state(receive=True)
def execute(self, **kwargs):
if self.now_verify_request == 'SMS':
key, level, message, image_str = \
self.crawler.send_sms_request(**self.parameters)
elif self.now_verify_request == 'Captcha':
key, level, message, image_str = \
self.crawler.send_captcha_request(**self.parameters)
if level == 0:
status_code = STATUS_CODE[SUCCESS_STATUS]
self.sent_verify_request.append(self.now_verify_request)
self.state_flag = 'WaitCode'
else:
status_code = STATUS_CODE[key]
self.state_flag = 'Failed'
self.verify_content = image_str
self.execute_status = status_code['status']
self.execute_message = message
message = generate_log_data(self, extend_log_func)
logger.info(message)
class UnderVerifyState(BaseState):
def __init__(self, **kwargs):
super(UnderVerifyState, self).__init__(**kwargs)
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
self.set_current_state(receive=True)
def execute(self, **kwargs):
key, level, message = self.crawler.verify(**self.parameters)
if level == 0:
status_code = STATUS_CODE[SUCCESS_STATUS]
self.state_flag = 'UnderCrawl'
elif level == 2:
status_code = STATUS_CODE[key]
self.state_flag = 'WaitCode'
else:
status_code = STATUS_CODE[key]
self.state_flag = 'Failed'
self.execute_status = status_code['status']
self.execute_message = message
message = generate_log_data(self, extend_log_func)
logger.info(message)
class UnderCrawlState(BaseState):
def __init__(self, **kwargs):
super(UnderCrawlState, self).__init__(**kwargs)
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
self.next_action = 'Finish'
self.set_current_state()
def execute(self, **kwargs):
user_info = {}
call_log = []
parse_call_log = True
for retry_time in xrange(CRAWL_RETRY_TIMES):
key, level, message, user_info = \
self.crawler.crawl_info(**self.parameters)
if level == 0 and user_info['full_name']:
break
time.sleep(3)
else:
status_code = STATUS_CODE[key]
save_data(self.sid, tel=self.tel,
user_info=user_info, call_log=call_log,
status=status_code['status'], message=message)
parse_call_log = False
if parse_call_log:
for retry_time in xrange(CRAWL_RETRY_TIMES):
key, level, message, call_log = \
self.crawler.crawl_call_log(**self.parameters)
if level == 0:
status_code = STATUS_CODE[SUCCESS_STATUS]
break
else:
status_code = STATUS_CODE[key]
time.sleep(3)
save_data(self.sid, tel=self.tel,
user_info=user_info, call_log=call_log,
status=status_code['status'], message=message)
if level == 0:
success_data(self.sid)
self.execute_status = status_code['status']
self.execute_message = message
self.state_flag = 'End'
message = generate_log_data(self, extend_log_func)
logger.info(message)
class FailedState(BaseState):
def __init__(self, **kwargs):
super(FailedState, self).__init__(**kwargs)
self.next_action = 'Reset'
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
save_status(self.sid, self.pre_status, self.pre_message)
message = generate_log_data(self, extend_log_func)
logger.info(message)
class AbortState(BaseState):
def __init__(self, **kwargs):
super(AbortState, self).__init__(**kwargs)
self.next_action = 'Finish'
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
status = STATUS_CODE['user_exit']['status']
message = STATUS_CODE['user_exit']['message']
save_status(self.sid, status, message)
message = generate_log_data(self, extend_log_func)
logger.info(message)
class EndState(BaseState):
def __init__(self, **kwargs):
super(EndState, self).__init__(**kwargs)
self.next_action = 'Finish'
self.need_verify_request = self.pre_state.need_verify_request
self.sent_verify_request = self.pre_state.sent_verify_request
self.now_verify_request = ''
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
message = generate_log_data(self, extend_log_func)
logger.info(message)
class NoCrawlState(BaseState):
def __init__(self, **kwargs):
super(NoCrawlState, self).__init__(**kwargs)
self.need_verify_request = []
self.sent_verify_request = []
self.now_verify_request = ''
self.next_action = 'NoCrawlFinish'
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
message = generate_log_data(self, extend_log_func)
logger.info(message)
class NoneState(BaseState):
def __init__(self, **kwargs):
super(NoneState, self).__init__(**kwargs)
self.next_action = 'Unsupported'
self.need_parameters = {
'need_full_name': 0,
'need_id_card': 0,
'need_pin_pwd': 0,
'need_sms_verify': 0,
'need_captcha_verify': 0
}
self.need_verify_request = []
self.sent_verify_request = []
self.now_verify_request = ''
self.set_current_state()
def execute(self, **kwargs):
self.state_flag = ''
status = STATUS_CODE['no_supported_crawler']['status']
message = STATUS_CODE['no_supported_crawler']['message']
save_status(self.sid, status, message)
message = generate_log_data(self, extend_log_func)
logger.info(message)
CLASS_LIST = inspect.getmembers(sys.modules[__name__], inspect.isclass)
CLASS_DICT = {pair[0]:pair[1] for pair in CLASS_LIST}
def state_interface(**kwargs):
state_flag = kwargs['state_flag']
if not state_flag:
return None
state_flag = '{}State'.format(state_flag)
return CLASS_DICT[state_flag](**kwargs)
| |
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
class Dropout(function_node.FunctionNode):
"""Dropout regularization."""
_use_cudnn = False
def __init__(self, dropout_ratio, mask=None):
if not 0.0 <= dropout_ratio < 1.0:
raise ValueError('dropout_ratio must be in the range [0, 1)')
self.dropout_ratio = dropout_ratio
self.mask = mask
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(x)
and self.mask is None):
return self._forward_ideep(x)
if self.mask is not None:
y = x[0] * self.mask
else:
scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
flag = numpy.random.rand(*x[0].shape) >= self.dropout_ratio
self.mask = scale * flag
y = x[0] * self.mask
return y,
def forward_gpu(self, x):
if (chainer.should_use_cudnn('==always', 5000)
and x[0].flags.c_contiguous
and self.mask is None):
self._use_cudnn = True
if hasattr(self, 'states'):
# if we already have a dropout mask,
# the forward operation is equal to backward.
return cuda.get_cudnn_dropout_states().backward(
None, x[0], self.dropout_ratio, self.states),
self.states, y = cuda.get_cudnn_dropout_states().forward(
None, x[0], self.dropout_ratio)
return y,
else:
if self.mask is not None:
y = x[0] * self.mask
else:
rand = cuda.cupy.random.rand(*x[0].shape, dtype=numpy.float32)
scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
self.mask, y = cuda.elementwise(
'T x, R r, T scale, T ratio', 'T mask, T y',
'''
mask = (r >= ratio) * scale;
y = x * mask;
''',
'dropout_fwd',
)(x[0], rand, scale, self.dropout_ratio)
return y,
def _forward_ideep(self, x):
mask, y = intel64.ideep.dropout.Forward(
intel64.ideep.array(x[0]),
self.dropout_ratio)
self.mask = mask
return y,
def backward(self, x, gy):
if chainer.should_use_cudnn('==always', 5000) and self._use_cudnn:
return DropoutGradCuDNN(self.states, self.dropout_ratio).apply(gy)
else:
return DropoutGrad(self.mask).apply(gy)
class DropoutGrad(function_node.FunctionNode):
"""Computes the gradient of the Dropout function."""
def __init__(self, mask):
self.mask = mask
def forward(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self._forward_ideep(inputs)
y = inputs[0] * self.mask
return y,
def _forward_ideep(self, inputs):
return intel64.ideep.dropout.Backward(
intel64.ideep.array(self.mask),
intel64.ideep.array(inputs[0])),
def backward(self, indexes, gy):
return DropoutGrad(self.mask).apply(gy)
class DropoutGradCuDNN(function_node.FunctionNode):
"""Computes the gradient of the Dropout function with cuDNN support."""
def __init__(self, states, dropout_ratio):
self.states = states
self.dropout_ratio = dropout_ratio
def forward(self, inputs):
return cuda.get_cudnn_dropout_states().backward(
None, inputs[0], self.dropout_ratio, self.states),
def backward(self, indexes, gy):
return DropoutGradCuDNN(self.states, self.dropout_ratio).apply(gy)
def dropout(x, ratio=.5, **kwargs):
"""dropout(x, ratio=.5, *, mask=None, return_mask=False)
Drops elements of input variable randomly.
This function drops input elements randomly with probability ``ratio`` and
scales the remaining elements by factor ``1 / (1 - ratio)``. In testing
mode (i.e., ``chainer.config.train`` is set to ``False``), it does nothing
and just returns ``x``.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', boolean)``.
See :func:`chainer.using_config`.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
ratio (float):
Dropout ratio. The ``ratio`` must be ``0.0 <= ratio < 1.0``.
mask (`ndarray` or None):
The mask to be used for dropout.
You do not have to specify this value, unless you need to make
results deterministic.
If ``mask`` is not specified or set to ``None``, a mask will be
generated randomly according to the given ``ratio``.
If ``mask`` is specified, ``ratio`` will be ignored.
The shape and dtype must be the same as ``x`` and should be on the
same device.
Note that iDeep and cuDNN will not be used for this function if
mask is specified, as iDeep and cuDNN do not support it.
return_mask (bool):
If ``True``, the mask used for dropout is returned together with
the output variable.
The returned mask can later be reused by passing it to ``mask``
argument.
Returns:
~chainer.Variable or tuple:
When ``return_mask`` is ``False`` (default), returns the output
variable.
When ``True``, returns the tuple of the output variable and
mask (`ndarray`). The mask will be on the same device as the input.
The mask will become ``None`` when ``chainer.config.train`` is set
to ``False``.
See the paper by G. Hinton: `Improving neural networks by preventing \
co-adaptation of feature detectors <https://arxiv.org/abs/1207.0580>`_.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
>>> with chainer.using_config('train', True):
... y = F.dropout(x)
>>> y.array
array([[-2., 0.],
[ 4., -6.],
[-0., 2.]], dtype=float32)
>>> with chainer.using_config('train', True):
... y = F.dropout(x, ratio=0.0) \
# dropout returns original input if ratio=0.0
>>> (x == y.array).all()
True
>>> with chainer.using_config('train', False):
... y = F.dropout(x) \
# dropout in test mode returns original input
>>> (x == y.array).all()
True
"""
mask = None
return_mask = False
if kwargs:
mask, return_mask = argument.parse_kwargs(
kwargs, ('mask', mask), ('return_mask', return_mask),
train='train argument is not supported anymore. '
'Use chainer.using_config')
if configuration.config.train:
func = Dropout(ratio, mask)
out, = func.apply((x,))
mask = func.mask
else:
out = chainer.as_variable(x)
mask = None
if return_mask:
return out, mask
return out
| |
#!/usr/bin/env python
"""usage: %prog [options] filename
Parse a document to a tree, with optional profiling
"""
import sys
import os
import traceback
from optparse import OptionParser
from html5lib import html5parser, sanitizer
from html5lib.tokenizer import HTMLTokenizer
from html5lib import treebuilders, serializer, treewalkers
from html5lib import constants
from html5lib import utils
def parse():
optParser = getOptParser()
opts,args = optParser.parse_args()
encoding = "utf8"
try:
f = args[-1]
# Try opening from the internet
if f.startswith('http://'):
try:
import urllib.request, urllib.parse, urllib.error, cgi
f = urllib.request.urlopen(f)
contentType = f.headers.get('content-type')
if contentType:
(mediaType, params) = cgi.parse_header(contentType)
encoding = params.get('charset')
except:
pass
elif f == '-':
f = sys.stdin
if sys.version_info[0] >= 3:
encoding = None
else:
try:
# Try opening from file system
f = open(f, "rb")
except IOError as e:
sys.stderr.write("Unable to open file: %s\n" % e)
sys.exit(1)
except IndexError:
sys.stderr.write("No filename provided. Use -h for help\n")
sys.exit(1)
treebuilder = treebuilders.getTreeBuilder(opts.treebuilder)
if opts.sanitize:
tokenizer = sanitizer.HTMLSanitizer
else:
tokenizer = HTMLTokenizer
p = html5parser.HTMLParser(tree=treebuilder, tokenizer=tokenizer, debug=opts.log)
if opts.fragment:
parseMethod = p.parseFragment
else:
parseMethod = p.parse
if opts.profile:
import cProfile
import pstats
cProfile.runctx("run(parseMethod, f, encoding)", None,
{"run": run,
"parseMethod": parseMethod,
"f": f,
"encoding": encoding},
"stats.prof")
# XXX - We should use a temp file here
stats = pstats.Stats('stats.prof')
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats()
elif opts.time:
import time
t0 = time.time()
document = run(parseMethod, f, encoding)
t1 = time.time()
if document:
printOutput(p, document, opts)
t2 = time.time()
sys.stderr.write("\n\nRun took: %fs (plus %fs to print the output)"%(t1-t0, t2-t1))
else:
sys.stderr.write("\n\nRun took: %fs"%(t1-t0))
else:
document = run(parseMethod, f, encoding)
if document:
printOutput(p, document, opts)
def run(parseMethod, f, encoding):
try:
document = parseMethod(f, encoding=encoding)
except:
document = None
traceback.print_exc()
return document
def printOutput(parser, document, opts):
if opts.encoding:
print("Encoding:", parser.tokenizer.stream.charEncoding)
for item in parser.log:
print(item)
if document is not None:
if opts.xml:
tb = opts.treebuilder.lower()
if tb == "dom":
document.writexml(sys.stdout, encoding="utf-8")
elif tb == "lxml":
import lxml.etree
sys.stdout.write(lxml.etree.tostring(document))
elif tb == "etree":
sys.stdout.write(utils.default_etree.tostring(document))
elif opts.tree:
if not hasattr(document,'__getitem__'):
document = [document]
for fragment in document:
print(parser.tree.testSerializer(fragment))
elif opts.hilite:
sys.stdout.write(document.hilite("utf-8"))
elif opts.html:
kwargs = {}
for opt in serializer.HTMLSerializer.options:
try:
kwargs[opt] = getattr(opts,opt)
except:
pass
if not kwargs['quote_char']:
del kwargs['quote_char']
tokens = treewalkers.getTreeWalker(opts.treebuilder)(document)
if sys.version_info[0] >= 3:
encoding = None
else:
encoding = "utf-8"
for text in serializer.HTMLSerializer(**kwargs).serialize(tokens, encoding=encoding):
sys.stdout.write(text)
if not text.endswith('\n'): sys.stdout.write('\n')
if opts.error:
errList=[]
for pos, errorcode, datavars in parser.errors:
errList.append("Line %i Col %i"%pos + " " + constants.E.get(errorcode, 'Unknown error "%s"' % errorcode) % datavars)
sys.stdout.write("\nParse errors:\n" + "\n".join(errList)+"\n")
def getOptParser():
parser = OptionParser(usage=__doc__)
parser.add_option("-p", "--profile", action="store_true", default=False,
dest="profile", help="Use the hotshot profiler to "
"produce a detailed log of the run")
parser.add_option("-t", "--time",
action="store_true", default=False, dest="time",
help="Time the run using time.time (may not be accurate on all platforms, especially for short runs)")
parser.add_option("-b", "--treebuilder", action="store", type="string",
dest="treebuilder", default="etree")
parser.add_option("-e", "--error", action="store_true", default=False,
dest="error", help="Print a list of parse errors")
parser.add_option("-f", "--fragment", action="store_true", default=False,
dest="fragment", help="Parse as a fragment")
parser.add_option("", "--tree", action="store_true", default=False,
dest="tree", help="Output as debug tree")
parser.add_option("-x", "--xml", action="store_true", default=False,
dest="xml", help="Output as xml")
parser.add_option("", "--no-html", action="store_false", default=True,
dest="html", help="Don't output html")
parser.add_option("", "--hilite", action="store_true", default=False,
dest="hilite", help="Output as formatted highlighted code.")
parser.add_option("-c", "--encoding", action="store_true", default=False,
dest="encoding", help="Print character encoding used")
parser.add_option("", "--inject-meta-charset", action="store_true",
default=False, dest="inject_meta_charset",
help="inject <meta charset>")
parser.add_option("", "--strip-whitespace", action="store_true",
default=False, dest="strip_whitespace",
help="strip whitespace")
parser.add_option("", "--omit-optional-tags", action="store_true",
default=False, dest="omit_optional_tags",
help="omit optional tags")
parser.add_option("", "--quote-attr-values", action="store_true",
default=False, dest="quote_attr_values",
help="quote attribute values")
parser.add_option("", "--use-best-quote-char", action="store_true",
default=False, dest="use_best_quote_char",
help="use best quote character")
parser.add_option("", "--quote-char", action="store",
default=None, dest="quote_char",
help="quote character")
parser.add_option("", "--no-minimize-boolean-attributes",
action="store_false", default=True,
dest="minimize_boolean_attributes",
help="minimize boolean attributes")
parser.add_option("", "--use-trailing-solidus", action="store_true",
default=False, dest="use_trailing_solidus",
help="use trailing solidus")
parser.add_option("", "--space-before-trailing-solidus",
action="store_true", default=False,
dest="space_before_trailing_solidus",
help="add space before trailing solidus")
parser.add_option("", "--escape-lt-in-attrs", action="store_true",
default=False, dest="escape_lt_in_attrs",
help="escape less than signs in attribute values")
parser.add_option("", "--escape-rcdata", action="store_true",
default=False, dest="escape_rcdata",
help="escape rcdata element values")
parser.add_option("", "--sanitize", action="store_true", default=False,
dest="sanitize", help="sanitize")
parser.add_option("-l", "--log", action="store_true", default=False,
dest="log", help="log state transitions")
return parser
if __name__ == "__main__":
parse()
| |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for pw_console.log_view"""
import logging
import time
import sys
import unittest
from datetime import datetime
from unittest.mock import MagicMock, patch
from parameterized import parameterized # type: ignore
from prompt_toolkit.data_structures import Point
from pw_console.console_prefs import ConsolePrefs
from pw_console.log_view import LogView
from pw_console.log_screen import ScreenLine
from pw_console.text_formatting import (
flatten_formatted_text_tuples,
join_adjacent_style_tuples,
)
_PYTHON_3_8 = sys.version_info >= (
3,
8,
)
def _create_log_view():
log_pane = MagicMock()
log_pane.pane_resized = MagicMock(return_value=True)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
application = MagicMock()
application.prefs = ConsolePrefs()
application.prefs.reset_config()
log_view = LogView(log_pane, application)
return log_view, log_pane
class TestLogView(unittest.TestCase):
"""Tests for LogView."""
# pylint: disable=invalid-name
def setUp(self):
self.maxDiff = None
# pylint: enable=invalid-name
def _create_log_view_with_logs(self, log_count=100):
log_view, log_pane = _create_log_view()
if log_count > 0:
test_log = logging.getLogger('log_view.test')
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for i in range(log_count):
test_log.debug('Test log %s', i)
return log_view, log_pane
def test_follow_toggle(self) -> None:
log_view, _pane = _create_log_view()
self.assertTrue(log_view.follow)
log_view.toggle_follow()
self.assertFalse(log_view.follow)
def test_follow_scrolls_to_bottom(self) -> None:
log_view, _pane = _create_log_view()
log_view.toggle_follow()
_fragments = log_view.render_content()
self.assertFalse(log_view.follow)
self.assertEqual(log_view.get_current_line(), 0)
test_log = logging.getLogger('log_view.test')
# Log 5 messagse, current_line should stay at 0
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for i in range(5):
test_log.debug('Test log %s', i)
_fragments = log_view.render_content()
self.assertEqual(log_view.get_total_count(), 5)
self.assertEqual(log_view.get_current_line(), 0)
# Turn follow on
log_view.toggle_follow()
self.assertTrue(log_view.follow)
# Log another messagse, current_line should move to the last.
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
test_log.debug('Test log')
_fragments = log_view.render_content()
self.assertEqual(log_view.get_total_count(), 6)
self.assertEqual(log_view.get_current_line(), 5)
def test_scrolling(self) -> None:
"""Test all scrolling methods."""
log_view, log_pane = self._create_log_view_with_logs(log_count=100)
# Page scrolling needs to know the current window height.
log_pane.pane_resized = MagicMock(return_value=True)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
log_view.render_content()
# Follow is on by default, current line should be at the end.
self.assertEqual(log_view.get_current_line(), 99)
# Move to the beginning.
log_view.scroll_to_top()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 0)
# Should not be able to scroll before the beginning.
log_view.scroll_up()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 0)
log_view.scroll_up_one_page()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 0)
# Single and multi line movement.
log_view.scroll_down()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 1)
log_view.scroll(5)
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 6)
log_view.scroll_up()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 5)
# Page down and up.
log_view.scroll_down_one_page()
self.assertEqual(log_view.get_current_line(), 15)
log_view.scroll_up_one_page()
self.assertEqual(log_view.get_current_line(), 5)
# Move to the end.
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 99)
# Should not be able to scroll beyond the end.
log_view.scroll_down()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 99)
log_view.scroll_down_one_page()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 99)
# Move up a bit to turn off follow
self.assertEqual(log_view.log_screen.cursor_position, 9)
log_view.scroll(-1)
self.assertEqual(log_view.log_screen.cursor_position, 8)
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 98)
# Simulate a mouse click to scroll.
# Click 1 lines from the top of the window.
log_view.scroll_to_position(Point(0, 1))
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 90)
# Disable follow mode if mouse click on line.
log_view.toggle_follow()
log_view.render_content()
self.assertTrue(log_view.follow)
self.assertEqual(log_view.get_current_line(), 99)
log_view.scroll_to_position(Point(0, 5))
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 95)
self.assertFalse(log_view.follow)
def test_render_content_and_cursor_position(self) -> None:
"""Test render_content results and get_cursor_position
get_cursor_position() should return the correct position depending on
what line is selected."""
# Mock time to always return the same value.
mock_time = MagicMock( # type: ignore
return_value=time.mktime(
datetime(2021, 7, 13, 0, 0, 0).timetuple()))
with patch('time.time', new=mock_time):
log_view, log_pane = self._create_log_view_with_logs(log_count=4)
# Mock needed LogPane functions that pull info from prompt_toolkit.
log_pane.get_horizontal_scroll_amount = MagicMock(return_value=0)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
log_view.render_content()
log_view.scroll_to_top()
log_view.render_content()
# Scroll to top keeps the cursor on the bottom of the window.
self.assertEqual(log_view.get_cursor_position(), Point(x=0, y=9))
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_cursor_position(), Point(x=0, y=9))
expected_formatted_text = [
('', ''),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
('', ' Test log 0'),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
('', ' Test log 1'),
('class:log-time', '20210713 00:00:00'),
('', ' '),
('class:log-level-10', 'DEBUG'),
('', ' Test log 2'),
('class:selected-log-line class:log-time', '20210713 00:00:00'),
('class:selected-log-line ', ' '),
('class:selected-log-line class:log-level-10', 'DEBUG'),
('class:selected-log-line ',
' Test log 3 ')
] # yapf: disable
result_text = join_adjacent_style_tuples(
flatten_formatted_text_tuples(log_view._line_fragment_cache)) # pylint: disable=protected-access
self.assertEqual(result_text, expected_formatted_text)
def test_clear_scrollback(self) -> None:
"""Test various functions with clearing log scrollback history."""
# pylint: disable=protected-access
# Create log_view with 4 logs
starting_log_count = 4
log_view, _pane = self._create_log_view_with_logs(
log_count=starting_log_count)
log_view.render_content()
# Check setup is correct
self.assertTrue(log_view.follow)
self.assertEqual(log_view.get_current_line(), 3)
self.assertEqual(log_view.get_total_count(), 4)
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()),
['Test log 0', 'Test log 1', 'Test log 2', 'Test log 3'])
# Clear scrollback
log_view.clear_scrollback()
log_view.render_content()
# Follow is still on
self.assertTrue(log_view.follow)
self.assertEqual(log_view.hidden_line_count(), 4)
# Current line index should stay the same
self.assertEqual(log_view.get_current_line(), 3)
# Total count should stay the same
self.assertEqual(log_view.get_total_count(), 4)
# No lines returned
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()), [])
# Add Log 4 more lines
test_log = logging.getLogger('log_view.test')
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for i in range(4):
test_log.debug('Test log %s', i + starting_log_count)
log_view.render_content()
# Current line
self.assertEqual(log_view.hidden_line_count(), 4)
self.assertEqual(log_view.get_last_log_index(), 7)
self.assertEqual(log_view.get_current_line(), 7)
self.assertEqual(log_view.get_total_count(), 8)
# Only the last 4 logs should appear
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()),
['Test log 4', 'Test log 5', 'Test log 6', 'Test log 7'])
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 7)
# Turn follow back on
log_view.toggle_follow()
log_view.undo_clear_scrollback()
# Current line and total are the same
self.assertEqual(log_view.get_current_line(), 7)
self.assertEqual(log_view.get_total_count(), 8)
# All logs should appear
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()), [
'Test log 0', 'Test log 1', 'Test log 2', 'Test log 3',
'Test log 4', 'Test log 5', 'Test log 6', 'Test log 7'
])
log_view.scroll_to_bottom()
log_view.render_content()
self.assertEqual(log_view.get_current_line(), 7)
def test_get_line_at_cursor_position(self) -> None:
"""Tests fuctions that rely on getting a log_index for the current
cursor position.
Including:
- LogScreen.fetch_subline_up
- LogScreen.fetch_subline_down
- LogView._update_log_index
"""
# pylint: disable=protected-access
# Create log_view with 4 logs
starting_log_count = 4
log_view, _pane = self._create_log_view_with_logs(
log_count=starting_log_count)
log_view.render_content()
# Check setup is correct
self.assertTrue(log_view.follow)
self.assertEqual(log_view.get_current_line(), 3)
self.assertEqual(log_view.get_total_count(), 4)
self.assertEqual(
list(log.record.message
for log in log_view._get_visible_log_lines()),
['Test log 0', 'Test log 1', 'Test log 2', 'Test log 3'])
self.assertEqual(log_view.log_screen.cursor_position, 9)
# Force the cursor_position to be larger than the log_screen
# line_buffer.
log_view.log_screen.cursor_position = 10
# Attempt to get the current line, no exception should be raised
result = log_view.log_screen.get_line_at_cursor_position()
# Log index should be None
self.assertEqual(result.log_index, None)
# Force the cursor_position to be < 0. This won't produce an error but
# would wrap around to the beginning.
log_view.log_screen.cursor_position = -1
# Attempt to get the current line, no exception should be raised
result = log_view.log_screen.get_line_at_cursor_position()
# Result should be a blank line
self.assertEqual(result, ScreenLine([('', '')]))
# Log index should be None
self.assertEqual(result.log_index, None)
def test_visual_select(self) -> None:
"""Test log line selection."""
log_view, log_pane = self._create_log_view_with_logs(log_count=100)
self.assertEqual(100, log_view.get_total_count())
# Page scrolling needs to know the current window height.
log_pane.pane_resized = MagicMock(return_value=True)
log_pane.current_log_pane_width = 80
log_pane.current_log_pane_height = 10
log_view.log_screen.reset_logs = MagicMock(
wraps=log_view.log_screen.reset_logs)
log_view.log_screen.get_lines = MagicMock(
wraps=log_view.log_screen.get_lines)
log_view.render_content()
log_view.log_screen.reset_logs.assert_called_once()
log_view.log_screen.get_lines.assert_called_once_with(
marked_logs_start=None, marked_logs_end=None)
log_view.log_screen.get_lines.reset_mock()
log_view.log_screen.reset_logs.reset_mock()
self.assertIsNone(log_view.marked_logs_start)
self.assertIsNone(log_view.marked_logs_end)
log_view.visual_select_line(Point(0, 9))
self.assertEqual(
(99, 99), (log_view.marked_logs_start, log_view.marked_logs_end))
log_view.visual_select_line(Point(0, 8))
log_view.visual_select_line(Point(0, 7))
self.assertEqual(
(97, 99), (log_view.marked_logs_start, log_view.marked_logs_end))
log_view.clear_visual_selection()
self.assertIsNone(log_view.marked_logs_start)
self.assertIsNone(log_view.marked_logs_end)
log_view.visual_select_line(Point(0, 1))
log_view.visual_select_line(Point(0, 2))
log_view.visual_select_line(Point(0, 3))
log_view.visual_select_line(Point(0, 4))
self.assertEqual(
(91, 94), (log_view.marked_logs_start, log_view.marked_logs_end))
# Make sure the log screen was not re-generated.
log_view.log_screen.reset_logs.assert_not_called()
log_view.log_screen.reset_logs.reset_mock()
# Render the screen
log_view.render_content()
log_view.log_screen.reset_logs.assert_called_once()
# Check the visual selection was specified
log_view.log_screen.get_lines.assert_called_once_with(
marked_logs_start=91, marked_logs_end=94)
log_view.log_screen.get_lines.reset_mock()
log_view.log_screen.reset_logs.reset_mock()
if _PYTHON_3_8:
from unittest import IsolatedAsyncioTestCase # type: ignore # pylint: disable=no-name-in-module
class TestLogViewFiltering(IsolatedAsyncioTestCase): # pylint: disable=undefined-variable
"""Test LogView log filtering capabilities."""
# pylint: disable=invalid-name
def setUp(self):
self.maxDiff = None
# pylint: enable=invalid-name
def _create_log_view_from_list(self, log_messages):
log_view, log_pane = _create_log_view()
test_log = logging.getLogger('log_view.test')
with self.assertLogs(test_log, level='DEBUG') as _log_context:
test_log.addHandler(log_view.log_store)
for log, extra_arg in log_messages:
test_log.debug('%s', log, extra=extra_arg)
return log_view, log_pane
@parameterized.expand([
(
# Test name
'regex filter',
# Search input_text
'log.*item',
# input_logs
[
('Log some item', dict()),
('Log another item', dict()),
('Some exception', dict()),
],
# expected_matched_lines
[
'Log some item',
'Log another item',
],
# expected_match_line_numbers
{0: 0, 1: 1},
# expected_export_text
(
' DEBUG Log some item\n'
' DEBUG Log another item\n'
),
None, # field
False, # invert
),
(
# Test name
'regex filter with field',
# Search input_text
'earth',
# input_logs
[
('Log some item',
dict(extra_metadata_fields={'planet': 'Jupiter'})),
('Log another item',
dict(extra_metadata_fields={'planet': 'Earth'})),
('Some exception',
dict(extra_metadata_fields={'planet': 'Earth'})),
],
# expected_matched_lines
[
'Log another item',
'Some exception',
],
# expected_match_line_numbers
{1: 0, 2: 1},
# expected_export_text
(
' DEBUG Earth Log another item\n'
' DEBUG Earth Some exception\n'
),
'planet', # field
False, # invert
),
(
# Test name
'regex filter with field inverted',
# Search input_text
'earth',
# input_logs
[
('Log some item',
dict(extra_metadata_fields={'planet': 'Jupiter'})),
('Log another item',
dict(extra_metadata_fields={'planet': 'Earth'})),
('Some exception',
dict(extra_metadata_fields={'planet': 'Earth'})),
],
# expected_matched_lines
[
'Log some item',
],
# expected_match_line_numbers
{0: 0},
# expected_export_text
(
' DEBUG Jupiter Log some item\n'
),
'planet', # field
True, # invert
),
]) # yapf: disable
async def test_log_filtering(
self,
_test_name,
input_text,
input_logs,
expected_matched_lines,
expected_match_line_numbers,
expected_export_text,
field=None,
invert=False,
) -> None:
"""Test run log view filtering."""
log_view, _log_pane = self._create_log_view_from_list(input_logs)
log_view.render_content()
self.assertEqual(log_view.get_total_count(), len(input_logs))
# Apply the search and wait for the match count background task
log_view.new_search(input_text, invert=invert, field=field)
await log_view.search_match_count_task
self.assertEqual(log_view.search_matched_lines,
expected_match_line_numbers)
# Apply the filter and wait for the filter background task
log_view.apply_filter()
await log_view.filter_existing_logs_task
# Do the number of logs match the expected count?
self.assertEqual(log_view.get_total_count(),
len(expected_matched_lines))
self.assertEqual(
[log.record.message for log in log_view.filtered_logs],
expected_matched_lines)
# Check exported text respects filtering
log_text = log_view._logs_to_text( # pylint: disable=protected-access
use_table_formatting=True)
# Remove leading time from resulting logs
log_text_no_datetime = ''
for line in log_text.splitlines():
log_text_no_datetime += (line[17:] + '\n')
self.assertEqual(log_text_no_datetime, expected_export_text)
# Select the bottom log line
log_view.render_content()
log_view.visual_select_line(Point(0, 9)) # Window height is 10
# Export to text
log_text = log_view._logs_to_text( # pylint: disable=protected-access
selected_lines_only=True,
use_table_formatting=False)
self.assertEqual(
# Remove date, time, and level
log_text[24:].strip(),
expected_matched_lines[0].strip())
# Clear filters and check the numbe of lines is back to normal.
log_view.clear_filters()
self.assertEqual(log_view.get_total_count(), len(input_logs))
if __name__ == '__main__':
unittest.main()
| |
"""
Support for ZhongHong HVAC Controller.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.zhong_hong/
"""
import logging
import voluptuous as vol
from homeassistant.components.climate import (
ATTR_OPERATION_MODE, PLATFORM_SCHEMA, STATE_COOL, STATE_DRY,
STATE_FAN_ONLY, STATE_HEAT, SUPPORT_FAN_MODE, SUPPORT_ON_OFF,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, ClimateDevice)
from homeassistant.const import (ATTR_TEMPERATURE, CONF_HOST, CONF_PORT,
EVENT_HOMEASSISTANT_STOP, TEMP_CELSIUS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (async_dispatcher_connect,
async_dispatcher_send)
_LOGGER = logging.getLogger(__name__)
CONF_GATEWAY_ADDRRESS = 'gateway_address'
REQUIREMENTS = ['zhong_hong_hvac==1.0.9']
SIGNAL_DEVICE_ADDED = 'zhong_hong_device_added'
SIGNAL_ZHONG_HONG_HUB_START = 'zhong_hong_hub_start'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST):
cv.string,
vol.Optional(CONF_PORT, default=9999):
vol.Coerce(int),
vol.Optional(CONF_GATEWAY_ADDRRESS, default=1):
vol.Coerce(int),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZhongHong HVAC platform."""
from zhong_hong_hvac.hub import ZhongHongGateway
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
gw_addr = config.get(CONF_GATEWAY_ADDRRESS)
hub = ZhongHongGateway(host, port, gw_addr)
devices = [
ZhongHongClimate(hub, addr_out, addr_in)
for (addr_out, addr_in) in hub.discovery_ac()
]
_LOGGER.debug("We got %s zhong_hong climate devices", len(devices))
hub_is_initialized = False
async def startup():
"""Start hub socket after all climate entity is setted up."""
nonlocal hub_is_initialized
if not all([device.is_initialized for device in devices]):
return
if hub_is_initialized:
return
_LOGGER.debug("zhong_hong hub start listen event")
await hass.async_add_job(hub.start_listen)
await hass.async_add_job(hub.query_all_status)
hub_is_initialized = True
async_dispatcher_connect(hass, SIGNAL_DEVICE_ADDED, startup)
# add devices after SIGNAL_DEVICE_SETTED_UP event is listend
add_entities(devices)
def stop_listen(event):
"""Stop ZhongHongHub socket."""
hub.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_listen)
class ZhongHongClimate(ClimateDevice):
"""Representation of a ZhongHong controller support HVAC."""
def __init__(self, hub, addr_out, addr_in):
"""Set up the ZhongHong climate devices."""
from zhong_hong_hvac.hvac import HVAC
self._device = HVAC(hub, addr_out, addr_in)
self._hub = hub
self._current_operation = None
self._current_temperature = None
self._target_temperature = None
self._current_fan_mode = None
self._is_on = None
self.is_initialized = False
async def async_added_to_hass(self):
"""Register callbacks."""
self._device.register_update_callback(self._after_update)
self.is_initialized = True
async_dispatcher_send(self.hass, SIGNAL_DEVICE_ADDED)
def _after_update(self, climate):
"""Handle state update."""
_LOGGER.debug("async update ha state")
if self._device.current_operation:
self._current_operation = self._device.current_operation.lower()
if self._device.current_temperature:
self._current_temperature = self._device.current_temperature
if self._device.current_fan_mode:
self._current_fan_mode = self._device.current_fan_mode
if self._device.target_temperature:
self._target_temperature = self._device.target_temperature
self._is_on = self._device.is_on
self.schedule_update_ha_state()
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self.unique_id
@property
def unique_id(self):
"""Return the unique ID of the HVAC."""
return "zhong_hong_hvac_{}_{}".format(self._device.addr_out,
self._device.addr_in)
@property
def supported_features(self):
"""Return the list of supported features."""
return (SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
| SUPPORT_OPERATION_MODE | SUPPORT_ON_OFF)
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [STATE_COOL, STATE_HEAT, STATE_DRY, STATE_FAN_ONLY]
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 1
@property
def is_on(self):
"""Return true if on."""
return self._device.is_on
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._device.fan_list
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._device.min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._device.max_temp
def turn_on(self):
"""Turn on ac."""
return self._device.turn_on()
def turn_off(self):
"""Turn off ac."""
return self._device.turn_off()
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is not None:
self._device.set_temperature(temperature)
operation_mode = kwargs.get(ATTR_OPERATION_MODE)
if operation_mode is not None:
self.set_operation_mode(operation_mode)
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
self._device.set_operation_mode(operation_mode.upper())
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
self._device.set_fan_mode(fan_mode)
| |
#!/usr/bin/env python
"""Test the collector flows.
To reduce the size of this module, additional collector flow tests are split out
into collectors_*_test.py files.
"""
import os
import mock
import psutil
from grr.client.client_actions import standard
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_registry
from grr.lib import artifact_utils
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import sequential_collection
from grr.lib import test_lib
from grr.lib import utils
# pylint: disable=unused-import
from grr.lib.flows.general import artifact_fallbacks
# pylint: enable=unused-import
from grr.lib.flows.general import collectors
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
def ProcessIter():
return iter([test_lib.MockWindowsProcess()])
class TestArtifactCollectors(test_lib.FlowTestsBaseclass):
"""Test the artifact collection mechanism with fake artifacts."""
def setUp(self):
"""Make sure things are initialized."""
super(TestArtifactCollectors, self).setUp()
test_artifacts_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
self.fakeartifact = artifact_registry.REGISTRY.GetArtifact("FakeArtifact")
self.fakeartifact2 = artifact_registry.REGISTRY.GetArtifact("FakeArtifact2")
self.output_count = 0
with aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") as fd:
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(kb)
def tearDown(self):
super(TestArtifactCollectors, self).tearDown()
self.fakeartifact.sources = [] # Reset any ArtifactSources
self.fakeartifact.conditions = [] # Reset any Conditions
self.fakeartifact2.sources = [] # Reset any ArtifactSources
self.fakeartifact2.conditions = [] # Reset any Conditions
def testInterpolateArgs(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
kb = rdf_client.KnowledgeBase()
kb.MergeOrAddUser(rdf_client.User(username="test1"))
kb.MergeOrAddUser(rdf_client.User(username="test2"))
collect_flow.state["knowledge_base"] = kb
collect_flow.current_artifact_name = "blah"
collect_flow.args = artifact_utils.ArtifactCollectorFlowArgs()
test_rdf = rdf_client.KnowledgeBase()
action_args = {
"usernames": ["%%users.username%%", "%%users.username%%"],
"nointerp": "asdfsdf",
"notastring": test_rdf
}
kwargs = collect_flow.InterpolateDict(action_args)
self.assertItemsEqual(kwargs["usernames"],
["test1", "test2", "test1", "test2"])
self.assertEqual(kwargs["nointerp"], "asdfsdf")
self.assertEqual(kwargs["notastring"], test_rdf)
# We should be using an array since users.username will expand to multiple
# values.
self.assertRaises(ValueError, collect_flow.InterpolateDict,
{"bad": "%%users.username%%"})
list_args = collect_flow.InterpolateList(
["%%users.username%%", r"%%users.username%%\aa"])
self.assertItemsEqual(list_args,
["test1", "test2", r"test1\aa", r"test2\aa"])
list_args = collect_flow.InterpolateList(["one"])
self.assertEqual(list_args, ["one"])
# Ignore the failure in users.desktop, report the others.
collect_flow.args.ignore_interpolation_errors = True
list_args = collect_flow.InterpolateList(
["%%users.desktop%%", r"%%users.username%%\aa"])
self.assertItemsEqual(list_args, [r"test1\aa", r"test2\aa"])
# Both fail.
list_args = collect_flow.InterpolateList(
[r"%%users.desktop%%\aa", r"%%users.sid%%\aa"])
self.assertItemsEqual(list_args, [])
def testGrepRegexCombination(self):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
self.assertEqual(collect_flow._CombineRegex([r"simple"]), "simple")
self.assertEqual(collect_flow._CombineRegex(["a", "b"]), "(a)|(b)")
self.assertEqual(collect_flow._CombineRegex(["a", "b", "c"]), "(a)|(b)|(c)")
self.assertEqual(
collect_flow._CombineRegex(["a|b", "[^_]b", "c|d"]),
"(a|b)|([^_]b)|(c|d)")
def testGrep(self):
class MockCallFlow(object):
def CallFlow(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
mock_call_flow = MockCallFlow()
with utils.Stubber(collectors.ArtifactCollectorFlow, "CallFlow",
mock_call_flow.CallFlow):
collect_flow = collectors.ArtifactCollectorFlow(None, token=self.token)
collect_flow.args = mock.Mock()
collect_flow.args.ignore_interpolation_errors = False
kb = rdf_client.KnowledgeBase()
kb.MergeOrAddUser(rdf_client.User(username="test1"))
kb.MergeOrAddUser(rdf_client.User(username="test2"))
collect_flow.state["knowledge_base"] = kb
collect_flow.current_artifact_name = "blah"
collector = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GREP,
attributes={
"paths": ["/etc/passwd"],
"content_regex_list": [r"^a%%users.username%%b$"]
})
collect_flow.Grep(collector, rdf_paths.PathSpec.PathType.TSK)
conditions = mock_call_flow.kwargs["conditions"]
self.assertEqual(len(conditions), 1)
regexes = conditions[0].contents_regex_match.regex.SerializeToString()
self.assertItemsEqual(regexes.split("|"), ["(^atest1b$)", "(^atest2b$)"])
self.assertEqual(mock_call_flow.kwargs["paths"], ["/etc/passwd"])
def testGetArtifact1(self):
"""Test we can get a basic artifact."""
client_mock = action_mocks.FileFinderClientMock()
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
# Dynamically add an ArtifactSource specifying the base path.
file_path = os.path.join(self.base_path, "test_img.dd")
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.FILE,
attributes={"paths": [file_path]})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
use_tsk=False,
token=self.token,
client_id=self.client_id):
pass
# Test the AFF4 file that was created.
fd1 = aff4.FACTORY.Open(
"%s/fs/os/%s" % (self.client_id, file_path), token=self.token)
fd2 = open(file_path, "rb")
fd2.seek(0, 2)
self.assertEqual(fd2.tell(), int(fd1.Get(fd1.Schema.SIZE)))
def testArtifactSkipping(self):
client_mock = action_mocks.ActionMock()
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
# This does not match the Artifact so it will not be collected.
client.Set(client.Schema.SYSTEM("Windows"))
kb = client.Get(client.Schema.KNOWLEDGE_BASE)
kb.os = "Windows"
client.Set(client.Schema.KNOWLEDGE_BASE, kb)
client.Flush()
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
use_tsk=False,
token=self.token,
client_id=self.client_id):
session_id = s
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(len(flow_obj.state.artifacts_skipped_due_to_condition), 1)
self.assertEqual(flow_obj.state.artifacts_skipped_due_to_condition[0],
["FakeArtifact", "os == 'Linux'"])
def testRunGrrClientActionArtifact(self):
"""Test we can get a GRR client artifact."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
client_mock = action_mocks.ActionMock(standard.ListProcesses)
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": r"ListProcesses"})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
fd = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.Process))
self.assertTrue(len(fd) == 1)
def testRunGrrClientActionArtifactSplit(self):
"""Test that artifacts get split into separate collections."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
client_mock = action_mocks.ActionMock(standard.ListProcesses)
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": r"ListProcesses"})
self.fakeartifact.sources.append(coll1)
self.fakeartifact2.sources.append(coll1)
artifact_list = ["FakeArtifact", "FakeArtifact2"]
for s in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id,
split_output_by_artifact=True):
session_id = s
# Check that we got two separate collections based on artifact name
fd = collectors.ArtifactCollectorFlow.ResultCollectionForArtifact(
session_id, "FakeArtifact", token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.Process))
self.assertEqual(len(fd), 1)
fd = collectors.ArtifactCollectorFlow.ResultCollectionForArtifact(
session_id, "FakeArtifact2", token=self.token)
self.assertEqual(len(fd), 1)
self.assertTrue(isinstance(list(fd)[0], rdf_client.Process))
def testConditions(self):
"""Test we can get a GRR client artifact with conditions."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
# Run with false condition.
client_mock = action_mocks.ActionMock(standard.ListProcesses)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": "ListProcesses"},
conditions=["os == 'Windows'"])
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
# Now run with matching or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertNotEqual(len(fd), 0)
# Now run with impossible or condition.
coll1.conditions.append("os == 'NotTrue'")
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
def testRegistryValueArtifact(self):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
test_lib.FakeRegistryVFSHandler):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(standard.StatFile)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.REGISTRY_VALUE,
attributes={
"key_value_pairs": [{
"key": (r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet"
r"\Control\Session Manager"),
"value":
"BootExecute"
}]
})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
# Test the statentry got stored.
fd = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.StatEntry))
urn = fd[0].pathspec.AFF4Path(self.client_id)
self.assertTrue(str(urn).endswith("BootExecute"))
def testRegistryDefaultValueArtifact(self):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
test_lib.FakeRegistryVFSHandler):
with test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
test_lib.FakeFullVFSHandler):
client_mock = action_mocks.ActionMock(standard.StatFile)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.REGISTRY_VALUE,
attributes={
"key_value_pairs": [{
"key": (r"HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest"),
"value": ""
}]
})
self.fakeartifact.sources.append(coll1)
artifact_list = ["FakeArtifact"]
for s in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
fd = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
self.assertTrue(isinstance(list(fd)[0], rdf_client.StatEntry))
self.assertEqual(fd[0].registry_data.GetValue(), "DefaultValue")
def testSupportedOS(self):
"""Test supported_os inside the collector object."""
with utils.Stubber(psutil, "process_iter", ProcessIter):
# Run with false condition.
client_mock = action_mocks.ActionMock(standard.ListProcesses)
coll1 = artifact_registry.ArtifactSource(
type=artifact_registry.ArtifactSource.SourceType.GRR_CLIENT_ACTION,
attributes={"client_action": "ListProcesses"},
supported_os=["Windows"])
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
# Now run with matching or condition.
coll1.conditions = []
coll1.supported_os = ["Linux", "Windows"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertNotEqual(len(fd), 0)
# Now run with impossible or condition.
coll1.conditions = ["os == 'Linux' or os == 'Windows'"]
coll1.supported_os = ["NotTrue"]
self.fakeartifact.sources = []
self.fakeartifact.sources.append(coll1)
fd = self._RunClientActionArtifact(client_mock, ["FakeArtifact"])
self.assertEqual(fd.__class__,
sequential_collection.GeneralIndexedCollection)
self.assertEqual(len(fd), 0)
def _RunClientActionArtifact(self, client_mock, artifact_list):
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
client.Set(client.Schema.SYSTEM("Linux"))
client.Flush()
self.output_count += 1
for s in test_lib.TestFlowHelper(
"ArtifactCollectorFlow",
client_mock,
artifact_list=artifact_list,
token=self.token,
client_id=self.client_id):
session_id = s
return flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
from nanopore.analyses.abstractAnalysis import AbstractAnalysis
from nanopore.analyses.utils import AlignedPair, getFastaDictionary, getFastqDictionary, getExonerateCigarFormatString, samIterator, pathToBaseNanoporeDir
import os
import pysam
import numpy
import math
import random
import xml.etree.cElementTree as ET
from jobTree.src.bioio import reverseComplement, prettyXml, system, fastaWrite, cigarRead, PairwiseAlignment, cigarReadFromString
from itertools import product
from cactus.bar.cactus_expectationMaximisation import Hmm
bases = "ACGT"
def getProb(subMatrix, start, end):
return subMatrix[(start, end)]
def calcBasePosteriorProbs(baseObservations, refBase,
evolutionarySubstitionMatrix, errorSubstutionMatrix):
logBaseProbs = map(lambda missingBase : math.log(getProb(evolutionarySubstitionMatrix, refBase, missingBase)) +
reduce(lambda x, y : x + y, map(lambda observedBase : math.log(getProb(errorSubstutionMatrix, missingBase, observedBase))*baseObservations[observedBase], bases)), bases)
totalLogProb = reduce(lambda x, y : x + math.log(1 + math.exp(y-x)), logBaseProbs)
return dict(zip(bases, map(lambda logProb : math.exp(logProb - totalLogProb), logBaseProbs)))
def loadHmmErrorSubstitutionMatrix(hmmFile):
hmm = Hmm.loadHmm(hmmFile)
m = hmm.emissions[:len(bases)**2]
m = map(lambda i : m[i] / sum(m[4*(i/4):4*(1 + i/4)]), range(len(m))) #Normalise m
return dict(zip(product(bases, bases), m))
def getNullSubstitutionMatrix():
return dict(zip(product(bases, bases), [1.0]*len(bases)**2))
def getJukesCantorTypeSubstitutionMatrix():
return dict(zip(product(bases, bases), map(lambda x : 0.8 if x[0] == x[1] else (0.2/3), product(bases, bases))))
class MarginAlignSnpCaller(AbstractAnalysis):
"""Calculates stats on snp calling.
"""
def run(self):
AbstractAnalysis.run(self) #Call base method to do some logging
refSequences = getFastaDictionary(self.referenceFastaFile) #Hash of names to sequences
readSequences = getFastqDictionary(self.readFastqFile) #Hash of names to sequences
node = ET.Element("marginAlignComparison")
for hmmType in ("cactus", "trained_0", "trained_20", "trained_40"):
for coverage in (1000000, 120, 60, 30, 10):
for replicate in xrange(3 if coverage < 1000000 else 1): #Do replicates, unless coverage is all
sam = pysam.Samfile(self.samFile, "r" )
#Trained hmm file to use.q
hmmFile0 = os.path.join(pathToBaseNanoporeDir(), "nanopore", "mappers", "blasr_hmm_0.txt")
hmmFile20 = os.path.join(pathToBaseNanoporeDir(), "nanopore", "mappers", "blasr_hmm_20.txt")
hmmFile40 = os.path.join(pathToBaseNanoporeDir(), "nanopore", "mappers", "blasr_hmm_40.txt")
#Get substitution matrices
nullSubstitionMatrix = getNullSubstitutionMatrix()
flatSubstitutionMatrix = getJukesCantorTypeSubstitutionMatrix()
hmmErrorSubstitutionMatrix = loadHmmErrorSubstitutionMatrix(hmmFile20)
#Load the held out snps
snpSet = {}
referenceAlignmentFile = self.referenceFastaFile + "_Index.txt"
if os.path.exists(referenceAlignmentFile):
seqsAndMutatedSeqs = getFastaDictionary(referenceAlignmentFile)
count = 0
for name in seqsAndMutatedSeqs:
if name in refSequences:
count += 1
trueSeq = seqsAndMutatedSeqs[name]
mutatedSeq = seqsAndMutatedSeqs[name + "_mutated"]
assert mutatedSeq == refSequences[name]
for i in xrange(len(trueSeq)):
if trueSeq[i] != mutatedSeq[i]:
snpSet[(name, i)] = trueSeq[i]
else:
assert name.split("_")[-1] == "mutated"
assert count == len(refSequences.keys())
#The data we collect
expectationsOfBasesAtEachPosition = {}
frequenciesOfAlignedBasesAtEachPosition = {}
totalSampledReads = 0
totalAlignedPairs = 0
totalReadLength = 0
totalReferenceLength = sum(map(len, refSequences.values()))
#Get a randomised ordering for the reads
reads = [ aR for aR in samIterator(sam) ]
random.shuffle(reads)
for aR in reads: #Iterate on the sam lines
if totalReadLength/totalReferenceLength >= coverage: #Stop when coverage exceeds the quota
break
totalReadLength += len(readSequences[aR.qname])
totalSampledReads += 1
#Temporary files
tempCigarFile = os.path.join(self.getLocalTempDir(), "rescoredCigar.cig")
tempRefFile = os.path.join(self.getLocalTempDir(), "ref.fa")
tempReadFile = os.path.join(self.getLocalTempDir(), "read.fa")
tempPosteriorProbsFile = os.path.join(self.getLocalTempDir(), "probs.tsv")
#Ref name
refSeqName = sam.getrname(aR.rname)
#Sequences
refSeq = refSequences[sam.getrname(aR.rname)]
#Walk through the aligned pairs to collate the bases of aligned positions
for aP in AlignedPair.iterator(aR, refSeq, readSequences[aR.qname]):
totalAlignedPairs += 1 #Record an aligned pair
key = (refSeqName, aP.refPos)
if key not in frequenciesOfAlignedBasesAtEachPosition:
frequenciesOfAlignedBasesAtEachPosition[key] = dict(zip(bases, [0.0]*len(bases)))
readBase = aP.getReadBase() #readSeq[aP.readPos].upper() #Use the absolute read, ins
if readBase in bases:
frequenciesOfAlignedBasesAtEachPosition[key][readBase] += 1
#Write the temporary files.
readSeq = aR.query #This excludes bases that were soft-clipped and is always of positive strand coordinates
fastaWrite(tempRefFile, refSeqName, refSeq)
fastaWrite(tempReadFile, aR.qname, readSeq)
#Exonerate format Cigar string, which is in readSeq coordinates (positive strand).
assert aR.pos == 0
assert aR.qstart == 0
assert aR.qend == len(readSeq)
assert aR.aend == len(refSeq)
cigarString = getExonerateCigarFormatString(aR, sam)
#Call to cactus_realign
if hmmType == "trained_0":
system("echo %s | cactus_realign %s %s --diagonalExpansion=10 --splitMatrixBiggerThanThis=100 --outputAllPosteriorProbs=%s --loadHmm=%s > %s" % \
(cigarString, tempRefFile, tempReadFile, tempPosteriorProbsFile, hmmFile0, tempCigarFile))
elif hmmType == "trained_20":
system("echo %s | cactus_realign %s %s --diagonalExpansion=10 --splitMatrixBiggerThanThis=100 --outputAllPosteriorProbs=%s --loadHmm=%s > %s" % \
(cigarString, tempRefFile, tempReadFile, tempPosteriorProbsFile, hmmFile20, tempCigarFile))
elif hmmType == "trained_40":
system("echo %s | cactus_realign %s %s --diagonalExpansion=10 --splitMatrixBiggerThanThis=100 --outputAllPosteriorProbs=%s --loadHmm=%s > %s" % \
(cigarString, tempRefFile, tempReadFile, tempPosteriorProbsFile, hmmFile40, tempCigarFile))
else:
system("echo %s | cactus_realign %s %s --diagonalExpansion=10 --splitMatrixBiggerThanThis=100 --outputAllPosteriorProbs=%s > %s" % \
(cigarString, tempRefFile, tempReadFile, tempPosteriorProbsFile, tempCigarFile))
#Now collate the reference position expectations
for refPosition, readPosition, posteriorProb in map(lambda x : map(float, x.split()), open(tempPosteriorProbsFile, 'r')):
key = (refSeqName, int(refPosition))
if key not in expectationsOfBasesAtEachPosition:
expectationsOfBasesAtEachPosition[key] = dict(zip(bases, [0.0]*len(bases)))
readBase = readSeq[int(readPosition)].upper()
if readBase in bases:
expectationsOfBasesAtEachPosition[key][readBase] += posteriorProb
#Collate aligned positions from cigars
sam.close()
totalHeldOut = len(snpSet)
totalNotHeldOut = totalReferenceLength - totalHeldOut
class SnpCalls:
def __init__(self):
self.falsePositives = []
self.truePositives = []
self.falseNegatives = []
self.notCalled = 0
@staticmethod
def bucket(calls):
calls = calls[:]
calls.sort()
buckets = [0.0]*101
for prob in calls: #Discretize
buckets[int(round(prob*100))] += 1
for i in xrange(len(buckets)-2, -1, -1): #Make cumulative
buckets[i] += buckets[i+1]
return buckets
def getPrecisionByProbability(self):
tPs = self.bucket(map(lambda x : x[0], self.truePositives))
fPs = self.bucket(map(lambda x : x[0], self.falsePositives))
return map(lambda i : float(tPs[i]) / (tPs[i] + fPs[i]) if tPs[i] + fPs[i] != 0 else 0, xrange(len(tPs)))
def getRecallByProbability(self):
return map(lambda i : i/totalHeldOut if totalHeldOut != 0 else 0, self.bucket(map(lambda x : x[0], self.truePositives)))
def getTruePositiveLocations(self):
return map(lambda x : x[1], self.truePositives)
def getFalsePositiveLocations(self):
return map(lambda x : x[1], self.falsePositives)
def getFalseNegativeLocations(self):
return map(lambda x : x[0], self.falseNegatives)
#The different call sets
marginAlignMaxExpectedSnpCalls = SnpCalls()
marginAlignMaxLikelihoodSnpCalls = SnpCalls()
maxFrequencySnpCalls = SnpCalls()
maximumLikelihoodSnpCalls = SnpCalls()
#Now calculate the calls
for refSeqName in refSequences:
refSeq = refSequences[refSeqName]
for refPosition in xrange(len(refSeq)):
mutatedRefBase = refSeq[refPosition].upper()
trueRefBase = (mutatedRefBase if not (refSeqName, refPosition) in snpSet else snpSet[(refSeqName, refPosition)]).upper()
key = (refSeqName, refPosition)
#Get base calls
for errorSubstitutionMatrix, evolutionarySubstitutionMatrix, baseExpectations, snpCalls in \
((flatSubstitutionMatrix, nullSubstitionMatrix, expectationsOfBasesAtEachPosition, marginAlignMaxExpectedSnpCalls),
(hmmErrorSubstitutionMatrix, nullSubstitionMatrix, expectationsOfBasesAtEachPosition, marginAlignMaxLikelihoodSnpCalls),
(flatSubstitutionMatrix, nullSubstitionMatrix, frequenciesOfAlignedBasesAtEachPosition, maxFrequencySnpCalls),
(hmmErrorSubstitutionMatrix, nullSubstitionMatrix, frequenciesOfAlignedBasesAtEachPosition, maximumLikelihoodSnpCalls)):
if key in baseExpectations:
#Get posterior likelihoods
expectations = baseExpectations[key]
totalExpectation = sum(expectations.values())
if totalExpectation > 0.0: #expectationCallingThreshold:
posteriorProbs = calcBasePosteriorProbs(dict(zip(bases, map(lambda x : float(expectations[x])/totalExpectation, bases))), mutatedRefBase,
evolutionarySubstitutionMatrix, errorSubstitutionMatrix)
probs = [ posteriorProbs[base] for base in "ACGT" ]
#posteriorProbs.pop(mutatedRefBase) #Remove the ref base.
#maxPosteriorProb = max(posteriorProbs.values())
#chosenBase = random.choice([ base for base in posteriorProbs if posteriorProbs[base] == maxPosteriorProb ]).upper() #Very naive way to call the base
for chosenBase in "ACGT":
if chosenBase != mutatedRefBase:
maxPosteriorProb = posteriorProbs[chosenBase]
if trueRefBase != mutatedRefBase and trueRefBase == chosenBase:
snpCalls.truePositives.append((maxPosteriorProb, refPosition)) #True positive
else:
snpCalls.falsePositives.append((maxPosteriorProb, refPosition)) #False positive
"""
snpCalls.falseNegatives.append((refPosition, trueRefBase, mutatedRefBase, probs)) #False negative
if trueRefBase != mutatedRefBase:
if trueRefBase == chosenBase:
snpCalls.truePositives.append((maxPosteriorProb, refPosition)) #True positive
else:
snpCalls.falseNegatives.append((refPosition, trueRefBase, mutatedRefBase, probs)) #False negative
else:
snpCalls.falsePositives.append((maxPosteriorProb, refPosition)) #False positive
"""
else:
snpCalls.notCalled += 1
#Now find max-fscore point
for snpCalls, tagName in ((marginAlignMaxExpectedSnpCalls, "marginAlignMaxExpectedSnpCalls"),
(marginAlignMaxLikelihoodSnpCalls, "marginAlignMaxLikelihoodSnpCalls"),
(maxFrequencySnpCalls, "maxFrequencySnpCalls"),
(maximumLikelihoodSnpCalls, "maximumLikelihoodSnpCalls")):
recall = snpCalls.getRecallByProbability()
precision = snpCalls.getPrecisionByProbability()
assert len(recall) == len(precision)
fScore, pIndex = max(map(lambda i : (2 * recall[i] * precision[i] / (recall[i] + precision[i]) if recall[i] + precision[i] > 0 else 0.0, i), range(len(recall))))
truePositives = snpCalls.getRecallByProbability()[pIndex]
falsePositives = snpCalls.getPrecisionByProbability()[pIndex]
optimumProbThreshold = float(pIndex)/100.0
#Write out the substitution info
node2 = ET.SubElement(node, tagName + "_" + hmmType, {
"coverage":str(coverage),
"actualCoverage":str(float(totalAlignedPairs)/totalReferenceLength),
"totalAlignedPairs":str(totalAlignedPairs),
"totalReferenceLength":str(totalReferenceLength),
"replicate":str(replicate),
"totalReads":str(len(reads)),
"avgSampledReadLength":str(float(totalReadLength)/totalSampledReads),
"totalSampledReads":str(totalSampledReads),
"totalHeldOut":str(totalHeldOut),
"totalNonHeldOut":str(totalNotHeldOut),
"recall":str(recall[pIndex]),
"precision":str(precision[pIndex]),
"fScore":str(fScore),
"optimumProbThreshold":str(optimumProbThreshold),
"totalNoCalls":str(snpCalls.notCalled),
"recallByProbability":" ".join(map(str, snpCalls.getRecallByProbability())),
"precisionByProbability":" ".join(map(str, snpCalls.getPrecisionByProbability())) })
#"falsePositiveLocations":" ".join(map(str, snpCalls.getFalsePositiveLocations())),
#"falseNegativeLocations":" ".join(map(str, snpCalls.getFalseNegativeLocations())),
#"truePositiveLocations":" ".join(map(str, snpCalls.getTruePositiveLocations())) })
for refPosition, trueRefBase, mutatedRefBase, posteriorProbs in snpCalls.falseNegatives:
ET.SubElement(node2, "falseNegative_%s_%s" % (trueRefBase, mutatedRefBase), { "posteriorProbs":" ".join(map(str, posteriorProbs))})
for falseNegativeBase in bases:
for mutatedBase in bases:
posteriorProbsArray = [ posteriorProbs for refPosition, trueRefBase, mutatedRefBase, posteriorProbs in snpCalls.falseNegatives if (trueRefBase.upper() == falseNegativeBase.upper() and mutatedBase.upper() == mutatedRefBase.upper() ) ]
if len(posteriorProbsArray) > 0:
summedProbs = reduce(lambda x, y : map(lambda i : x[i] + y[i], xrange(len(x))), posteriorProbsArray)
summedProbs = map(lambda x : float(x)/sum(summedProbs), summedProbs)
ET.SubElement(node2, "combinedFalseNegative_%s_%s" % (falseNegativeBase, mutatedBase), { "posteriorProbs":" ".join(map(str, summedProbs))})
open(os.path.join(self.outputDir, "marginaliseConsensus.xml"), "w").write(prettyXml(node))
#Indicate everything is all done
self.finish()
| |
#!/usr/bin/env python
# Copyright 2016, 2017 Matteo Franchin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Simple image viewer.'''
import os
import sys
import argparse
import logging
import pygtk
pygtk.require('2.0')
import gtk
import gobject
from .browser_tab import BrowserTab
from .viewer_tab import ViewerTab
from .toolbar_window import ToolbarWindow
from . import file_utils
from .file_utils import FileList
from .config import Config, setup_logging, logger, version, SCALAR2
def create_action_tuple(name=None, stock_id=None, label=None, accel=None,
tooltip=None, fn=None):
if accel is None and tooltip is None and fn is None:
return (name, stock_id, label)
return (name, stock_id, label, accel, tooltip, fn)
def create_toggle_tuple(name=None, stock_id=None, label=None, accel=None,
tooltip=None, fn=None, value=None):
return (name, stock_id, label, accel, tooltip, fn, value)
def create_radio_tuple(name=None, stock_id=None, label=None, accel=None,
tooltip=None, value=0):
assert name is not None, 'name must be given in radio action tuple'
return (name, stock_id, label, accel, tooltip, value)
class ApplicationMainWindow(gtk.Window):
application_name = 'Immagine image viewer'
def __init__(self, start_path, image_paths=[],
parent=None, config=None):
super(ApplicationMainWindow, self).__init__()
self.fullscreen_widget = None
self.fullscreen_toolbar = ToolbarWindow()
self.open_dialog = None
self._config = config or Config()
self.sort_type = FileList.SORT_BY_MOD_DATE
# Set screen size.
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
width, height = self._get_window_size()
self.set_default_size(width, height)
# Populate the window.
self.set_title(self.application_name)
self.ui_manager = ui_manager = gtk.UIManager()
self.set_data('ui-manager', ui_manager)
ui_info, action_group = self._create_action_group()
ui_manager.insert_action_group(action_group, 0)
self.add_accel_group(ui_manager.get_accel_group())
ui_manager.add_ui_from_string(ui_info)
# Connect configuration handling with GUI.
cfg = self._config
cfg.override('thumb.size', self._thumb_size_getter)
cfg.override('screen.size', self._screen_size_getter)
hide_toggle = ui_manager.get_widget('/MenuBar/ViewMenu/ShowHidden')
cfg.override('browser.show_hidden_files',
lambda *args: hide_toggle.get_active())
reverse_toggle = \
ui_manager.get_widget('/MenuBar/ViewMenu/ReverseSortOrder')
cfg.override('browser.reversed_sort',
lambda *args: reverse_toggle.get_active())
cfg.override('browser.sort_type', lambda *args: self.sort_type)
# The menu is shared across all tabs.
bar = ui_manager.get_widget('/MenuBar')
bar.show()
# Below the menu, we place the notebook.
self.notebook = nb = gtk.Notebook()
nb.set_scrollable(True)
nb.set_tab_pos(gtk.POS_TOP)
# One a browsing tab for the given directory and one viewer tab for
# each given image.
self.browser_tab = self.open_tab(start_path)
for image_path in image_paths:
self.open_tab(image_path)
# Place menu and notebook in a VBox. Add this to the window.
self.window_content = vbox = gtk.VBox()
vbox.pack_start(bar, expand=False)
vbox.pack_start(nb)
self.add(self.window_content)
# Allow the window to get events.
mask = gtk.gdk.KEY_PRESS_MASK | gtk.gdk.POINTER_MOTION_MASK
self.add_events(mask)
self.connect('key-press-event', self.on_key_press_event)
self.connect("motion_notify_event", self.on_motion_notify_event)
self.show_all()
def _screen_size_getter(self, parent=None, attr_name=None):
screen = self.get_screen()
num_monitors = screen.get_n_monitors()
geoms = [screen.get_monitor_geometry(i) for i in range(num_monitors)]
return (min(g.width for g in geoms), min(g.height for g in geoms))
def _get_window_size(self, min_size=(200, 200)):
screen_size = self._screen_size_getter()
rel_val = self._config.get('window.rel_size', of=SCALAR2,
default=(0.5, 0.8))
abs_val = self._config.get('window.size', of=SCALAR2,
default=(None, None))
ret = []
for i, v in enumerate(abs_val):
if v is None:
v = int(screen_size[i] * min(1.0, max(0.0, rel_val[i])))
ret.append(max(min_size[i], min(screen_size[i], v)))
return tuple(ret)
def _thumb_size_getter(self, parent=None, attr_name=None):
screen_size = self._screen_size_getter()
rel_size = self._config.get('thumb.rel_size', of=SCALAR2,
default=(0.5 / 4, 0.5 / 3))
abs_val = self._config.get('thumb.max_size', of=SCALAR2,
default=(None, None))
ret = []
for i, v in enumerate(abs_val):
if v is None:
v = int(screen_size[i] * min(1.0, max(0.0, rel_size[i])))
ret.append(max(5, v))
return ret
def change_layout(self):
'''Switch in/out of fullscreen layout.'''
nb = self.notebook
if not self.get_fullscreen_mode():
# Go to fullscreen mode.
# Remove the widget (to be fullscreen-ed) from its parent tab.
n = nb.get_current_page()
tab = nb.get_nth_page(n)
toolbar, tab_content = tab.get_children()
tab.remove(toolbar)
tab.remove(tab_content)
# Remember the parent widget so that we can restore the tab when
# going out of fullscreen mode.
self.fullscreen_widget = tab
self.fullscreen_toolbar.begin(toolbar)
# Remove the main window widget and replace it with the tab widget.
self.remove(self.window_content)
self.add(tab_content)
else:
# Quit fullscreen mode.
# Remove the tab widget from the window and replace it with the
# default widget.
tab_content = self.get_children()[0]
self.remove(tab_content)
self.add(self.window_content)
# Put back the tab widget to its original parent.
toolbar = self.fullscreen_toolbar.end()
self.fullscreen_widget.pack_start(toolbar, expand=False)
self.fullscreen_widget.pack_start(tab_content)
self.fullscreen_widget = None
fs = self.get_fullscreen_mode()
for page in range(nb.get_n_pages()):
tab = nb.get_nth_page(page)
tab.set_fullscreen(fs)
def on_motion_notify_event(self, widget, event):
if self.get_fullscreen_mode():
self.fullscreen_toolbar.show_if_mouse_nearby()
def get_current_tab(self):
'''Return the active tab. This is either a BrowserTab or a ViewerTab.
'''
if self.fullscreen_widget is not None:
return self.fullscreen_widget
n = self.notebook.get_current_page()
return self.notebook.get_nth_page(n)
def _create_action_group(self):
ui_info = \
'''<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menuitem action='Open'/>
<menuitem action='SaveConfig'/>
<separator/>
<menuitem action='Quit'/>
</menu>
<menu action='ViewMenu'>
<menuitem action='CloseTab'/>
<menuitem action='Fullscreen'/>
<menuitem action='ShowHidden'/>
<menu action='SortFilesBy'>
<menuitem action='SortFilesByName'/>
<menuitem action='SortFilesByModDate'/>
<menuitem action='SortFilesByExt'/>
<menuitem action='SortFilesBySize'/>
</menu>
<menuitem action='ReverseSortOrder'/>
</menu>
<menu action='HelpMenu'>
<menuitem action='About'/>
</menu>
</menubar>
</ui>'''
action = create_action_tuple
action_entries = \
(action(name='FileMenu', label='_File'),
action(name='ViewMenu', label='_View'),
action(name='SortFilesBy', label='_Sort files by'),
action(name='HelpMenu', label='_Help'),
action(name='Quit', stock_id=gtk.STOCK_QUIT, label='Quit',
accel='<control>Q', tooltip='Quit',
fn=self.quit_action),
action(name='Open', stock_id=gtk.STOCK_OPEN,
label='_Open directory', accel='<control>O',
tooltip='Open a directory', fn=self.on_open_location),
action(name='SaveConfig', stock_id=gtk.STOCK_SAVE,
label='_Save configuration', accel='<control>C',
tooltip='Save Immagine configuration',
fn=self.on_save_config),
action(name='CloseTab', label='_Close current tab',
accel='<control>W', tooltip='Toggle fullscreen mode',
fn=self.close_tab_action),
action(name='Fullscreen', label='_Fullscreen', accel='F11',
tooltip='Toggle fullscreen mode',
fn=self.fullscreen_action),
action(name='About', label='_About', accel='<control>A',
tooltip='About', fn=self.about_action))
toggle = create_toggle_tuple
toggle_entries = \
(toggle(name='ShowHidden', label='_Show hidden files',
accel='<control>H', tooltip='Show hidden files',
fn=self.update_album_handler, value=False),
toggle(name='ReverseSortOrder', label='_Reverse sort order',
accel='<control>R', tooltip='Reverse the sort order',
fn=self.update_album_handler, value=False))
radio = create_radio_tuple
radio_entries = \
(radio(name='SortFilesByName', label='Name',
accel='<control>N', tooltip='Sort by file name',
value=FileList.SORT_BY_FILE_NAME),
radio(name='SortFilesByModDate', label='Modification date',
accel='<control>D',
tooltip='Sort first by modification date, then name',
value=FileList.SORT_BY_MOD_DATE),
radio(name='SortFilesByExt', label='Extension',
accel='<control>E',
tooltip='Sort first by file extension, then name',
value=FileList.SORT_BY_FILE_EXT),
radio(name='SortFilesBySize', label='Size',
accel='<control>S',
tooltip='Sort first by file size, then name',
value=FileList.SORT_BY_FILE_SIZE))
action_group = gtk.ActionGroup('AppWindowActions')
action_group.add_actions(action_entries)
action_group.add_toggle_actions(toggle_entries)
action_group.add_radio_actions(radio_entries,
on_change=self.on_radio_change,
value=self.sort_type)
return (ui_info, action_group)
def quit_action(self, action):
gtk.main_quit()
def open_tab(self, path):
'''Create a new tab for the given file/directory path.'''
if os.path.isdir(path):
return self.open_browser_tab(path)
else:
return self.open_viewer_tab(path)
def open_browser_tab(self, path):
'''Create a new BrowserTab to browser the given directory path.'''
if not os.path.isdir(path):
return None
bt = BrowserTab(path, config=self._config)
bt.set_callback('toggle_fullscreen', self.fullscreen_action)
bt.set_callback('directory_changed', self.on_directory_changed)
bt.set_callback('image_clicked', self.on_image_clicked)
bt.set_callback('open_location', self.on_open_location)
bt.show_all()
self.notebook.append_page(bt, tab_label=bt.label)
self.notebook.set_tab_reorderable(bt, True)
self.notebook.set_current_page(-1)
return bt
def open_viewer_tab(self, path, **kwargs):
'''Create a new ViewerTab to view the image at the given path.'''
vt = ViewerTab(path, config=self._config, **kwargs)
vt.set_callback('close_tab', self.on_close_tab)
vt.set_callback('toggle_fullscreen', self.fullscreen_action)
vt.show_all()
self.notebook.append_page(vt, tab_label=vt.tab_top)
self.notebook.set_tab_reorderable(vt, True)
self.notebook.set_current_page(-1)
self.on_tab_changed()
def on_tab_changed(self):
'''Called when the current tab changes.'''
if self.get_fullscreen_mode():
self.change_layout()
self.change_layout()
def about_action(self, action):
dialog = gtk.AboutDialog()
dialog.set_name('Immagine {}'.format(version))
dialog.set_copyright('\302\251 Copyright 2016, 2017 Matteo Franchin')
dialog.set_website('https://github.com/mfnch/immagine')
dialog.connect('response', lambda d, r: d.destroy())
dialog.show()
def close_tab_action(self, action):
n = self.notebook.get_current_page()
self.on_close_tab(self.notebook.get_nth_page(n))
def get_fullscreen_mode(self):
return (self.fullscreen_widget is not None)
def fullscreen_action(self, action):
self.change_layout()
if self.get_fullscreen_mode():
self.fullscreen()
else:
self.unfullscreen()
def on_key_press_event(self, main_window, event):
tab = self.get_current_tab()
return tab.on_key_press_event(event)
def on_directory_changed(self, new_directory):
self.set_title(new_directory + ' - ' + self.application_name)
def on_image_clicked(self, file_list, file_item):
if not file_item.is_dir:
self.open_viewer_tab(file_item.full_path,
file_list=file_list,
file_index=file_item.index)
def on_close_tab(self, viewer):
if isinstance(viewer, ViewerTab):
n = self.notebook.page_num(viewer)
self.notebook.remove_page(n)
self.on_tab_changed()
def on_open_location(self, *action):
if self.open_dialog is None:
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK)
self.open_dialog = fc = gtk.FileChooserDialog(
title='Choose directory',
parent=None,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=buttons, backend=None)
fc.set_default_response(gtk.RESPONSE_OK)
f = gtk.FileFilter()
f.set_name('All files')
f.add_pattern("*")
fc.add_filter(f)
f = gtk.FileFilter()
f.set_name("Images")
f.add_mime_type("image/png")
f.add_mime_type("image/jpeg")
f.add_mime_type("image/gif")
for ext in file_utils.image_file_extensions:
f.add_pattern('*' + ext)
fc.add_filter(f)
fc = self.open_dialog
response = fc.run()
choice = (fc.get_filename() if response == gtk.RESPONSE_OK else None)
fc.hide()
if choice is not None:
self.browser_tab.go_to_directory(choice)
def on_save_config(self, *args):
self._config.save()
def update_album_handler(self, *args):
self.browser_tab.update_album()
def on_radio_change(self, first_radio, active_radio):
new_sort_type = active_radio.get_current_value()
if new_sort_type != self.sort_type:
self.sort_type = new_sort_type
self.browser_tab.update_album()
def main(args=None):
dsc = ('Immagine {} - image viewer with focus on the browsing experience'
.format(version))
parser = argparse.ArgumentParser(description=dsc)
parser.add_argument('paths', metavar='PATH', type=str, nargs='*',
help=('Path to file or directory. All paths to files '
'are handled by opening a viewer tab. Only one '
'directory path should be provided and is used '
'as the initial browsing directory.'))
parser.add_argument('-l', '--log', metavar='LEVEL', dest='loglevel',
choices=['DEBUG', 'WARN', 'ERROR', 'SILENT'],
default=None,
help='Log level. One of: DEBUG, WARN, ERROR, SILENT.')
args = parser.parse_args()
setup_logging(args.loglevel)
cfg = Config()
cfg.load()
img_paths = []
dir_path = None
for path in args.paths:
if os.path.isdir(path):
if dir_path is not None:
logger.warn('Ignoring argument {}'.format(dir_path))
dir_path = path
elif os.path.exists(path):
img_paths.append(path)
else:
logger.warn('Ignoring argument {}'.format(path))
# If a directory was not provided, take the directory of the first image.
if dir_path is None:
if img_paths:
dir_path = os.path.dirname(img_paths[0])
if dir_path is None or not os.path.isdir(dir_path):
dir_path = os.getcwd()
dir_path = os.path.realpath(dir_path)
gtk.gdk.threads_init()
with gtk.gdk.lock:
ApplicationMainWindow(dir_path, img_paths, config=cfg)
gtk.main()
| |
from appfd import forms, models
from appfd.forms import *
from appfd.models import *
from appfd.scripts import excel, resolve, tables
from appfd.scripts.excel import *
from bnlp import clean as bnlp_clean
from bnlp import getLocationsAndDatesFromEnglishText, getLocationsFromEnglishText
from bscrp import getRandomUserAgentString
from collections import Counter
import csv
from datetime import datetime
from docx import Document
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.mail import send_mail
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db import connection
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404, render_to_response
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils.crypto import get_random_string
from django.views.decorators.csrf import csrf_protect
from itertools import groupby, islice
from location_extractor import *
from magic import from_file
from multiprocessing import Process
from openpyxl import load_workbook
from operator import itemgetter
from os import listdir, mkdir, remove
from os.path import isfile
import geojson, json, requests, StringIO, sys, zipfile
from geojson import Feature, FeatureCollection, MultiPolygon, Point
import geojson
from json import dumps, loads
from re import findall
from requests import get
from appfd.scripts.resolve import *
from sendfile import sendfile
from subprocess import call, check_output
from super_python import superfy
from urllib import quote, quote_plus, urlretrieve
from openpyxl import load_workbook
#import sys
"""
#def log(string):
# print >> sys.stderr, 'message ...'
# print string
# with open("/home/usrfd/logfd.txt", "wb") as f:
# f.write(string)
with open("/tmp/logfd.txt", "a") as f:
f.write(string)
"""
##basically if aws
#if check_output(['uname','-n']).startswith("ip"):
# #f = open("/tmp/stdout","w")
# sys.stdout = sys.stderr
# Create your views here.
def must_be_active(user):
return user.is_authenticated() and user.is_active
def about(request):
return render(request, "appfd/about.html", {})
def activate(request, key):
activation = Activation.objects.get(key=key)
print "activation is", activation
if activation.used:
print "activation has already been used"
else:
print "activation has not been used yet"
if activation.expired:
print "activation has expired"
else:
print "activation hadn't expired as of last check"
print "today is", datetime.now()
print "date of activation key is", activation.created
difference = (datetime.now() - activation.created.replace(tzinfo=None)).days
print "difference is", difference
if difference > 7:
print "activation has timed out and expired"
activation.expired
activation.save()
else:
print "you still got time, so we're gonna activate"
user = activation.user
user.is_active = True
user.save()
activation.used = True
activation.save()
return HttpResponseRedirect('/')
@csrf_protect
@user_passes_test(must_be_active)
def change_email(request):
user = request.user
print "user is", user
if request.method == 'POST':
print "request.method is", request.method
form = forms.ChangeEmailForm(data=request.POST)
if form.is_valid():
print "form is valid with cleaned_data", form.cleaned_data
password = form.cleaned_data['password']
if user.check_password(password):
new_email = form.cleaned_data['new_email']
new_email2 = form.cleaned_data['new_email2']
if new_email == new_email2:
if len(new_email) <= 30:
if User.objects.filter(email=email).exists():
form.add_error('new_email', 'Uh oh! A user with that email already exists.')
else:
user.is_active = False
user.username = new_email
user.email = new_email
user.save()
#create activation object/key
Activation.objects.create(expired=False, key=get_random_string(length=175), used=False, user=user)
print "send an activation email"
link = "http://" + host + "/activate/" + key
try: send_mail("[First Draft] Confirm Your New Email Address", "Please click the following link in order to re-activate the account under a new email address: " + link, "2347812637543875413287548123548213754@gmail.com", [user.email], fail_silently=False, html_message="<h3>Please click the following link in order to re-activate the account under a new email address:</h3></br></br><a href='" + link + "'>" + link + "</a>")
except Exception as e: print e
alerts = [{"type": "warning", "text": "Re-activate your account under the new email adress by going to the link we sent in an email to " + str(request.user.email)}]
return render(request, 'appfd/change_email.html', {'alerts': alerts})
else: #len(new_email) > 30
form.add_error('new_email', 'Your email is too long. We only accept emails that are less than 30 characters long.')
return render(request, 'appfd/change_email.html', {"form": form})
else: #new_email != new_email2
form.add_error('new_email', 'That two email addresses you entered do not match.')
return render(request, 'appfd/change_email.html', {"form": form})
else:
form.add_error('new_email', 'The password that you entered is incorrect.')
return render(request, 'appfd/change_email.html', {"form": form})
else: #form is not valid
print "form is not valid:", form.errors
return render(request, 'appfd/change_email.html', {"form": form})
elif request.method == "GET":
print "request.method == \"GET\'"
current_email = request.user.email
print "current_email is", current_email
return render(request, 'appfd/change_email.html', {"current_email": current_email})
@csrf_protect
@user_passes_test(must_be_active)
def change_password(request):
if request.method == 'POST':
print "request.method is", request.method
form = forms.ChangePasswordForm(data=request.POST)
if form.is_valid():
print "form is valid with cleaned_data", form.cleaned_data
old_password = form.cleaned_data['old_password']
user = request.user
if user.check_password(old_password):
new_password = form.cleaned_data['new_password']
user.set_password(new_password)
user.save()
alerts = [{"type": "danger", "text": "Activate your account by going to the link we sent in an email to " + str(request.user.email)}]
return render(request, 'appfd/change_password.html', {'alerts': alerts})
else:
form.add_error('old_password', 'You entered the wrong old password. Click forgot my password if you forgot it.')
return render(request, 'appfd/change_password.html', {'form': form})
else:
print "form is valid"
return render(request, 'appfd/change_password.html', {'form': form})
else:
print "request.method is probably get"
return render(request, 'appfd/change_password.html', {})
def contact(request):
return render(request, "appfd/contact.html", {})
def contributing(request):
return render(request, "appfd/contributing.html", {})
# this method takes in data as text and returns a geojson of the map
def crunch(request):
try:
print "starting crunch"
if request.method == 'POST':
log("request.method is post")
print "finish crunch"
except Exception as e:
log(str(e))
def disclaimers(request):
return render(request, "appfd/disclaimers.html", {})
def help(request):
return render(request, "appfd/help.html", {})
def index(request):
print "starting index with request"
user = request.user
alerts = list(Alert.objects.filter(user=None))
print "type(alerts) is", type(alerts)
try:
if user.is_authenticated():
if request.user.is_active:
print "user is activate so no need to deal with activation"
activation = Activation.objects.get(user=request.user)
if not activation.notified_success:
alerts.append({"type": "success", "text": "You have successfully activated your account!"})
activation.notified_success = True
activation.save()
else:
print "user is not active, so haven't activated yet"
alerts.append({"type": "danger", "text": "Activate your account by going to the link we sent in an email to " + str(request.user.email)})
else:
print "User hasn't logged in, so don't bother with account activation alerts"
except Exception as e: print e
print "about to finish index"
return render(request, "appfd/index.html", {'alerts': alerts})
@csrf_protect
def password_recovery(request):
user = request.user
if user.is_authenticated():
print "user is authenticated, so shouldn't be using forgot_password screen"
else:
if request.method == 'POST':
print "request.method is post, so change the password and send that email!!"
alerts = []
form = forms.EmailForm(data=request.POST)
if form.is_valid():
print "cd is", form.cleaned_data
email = form.cleaned_data['email']
qs = User.objects.filter(email=email)
count = qs.count()
if count == 1:
print "looking good. there is 1 user with that email"
user_supposed = qs[0]
new_password = get_random_string(length=175)
user_supposed.set_password(new_password)
user_supposed.save()
print "password changed to temp password"
print "now send it in an email"
alerts.append({"type": "success", "text": "We have sent you an email with a new temporary password."})
#print "create the alert that will show on the page"
#Alert.objects.create(permanent=False, text="We recently sent you an email to you with a new temporary password. Please make sure to change that password.", user=user_supposed)
elif count == 0:
form.add_error('email', 'There are no users with that email address')
elif count > 1:
form.add_error('email', 'Something is wrong. More than 1 user has that email address')
return render(request, "appfd/password_recovery.html", {'alerts': alerts, 'form': form})
else:
return render(request, "appfd/password_recovery.html", {})
# #changes password to long ass string and then emails this to the user
#
# #change password
def mission(request):
return render(request, "appfd/mission.html", {})
@csrf_protect
def register(request):
host = request.get_host()
if request.method == 'POST':
print "request.method is", request.method
form = forms.RegistrationForm(data=request.POST)
if form.is_valid():
print "form is valid with cleaned_data", form.cleaned_data
email = form.cleaned_data['email']
if User.objects.filter(email=email).count() > 0:
print "count > 0"
form.add_error('email', "The email " + email + " is already being used.")
return render(request, 'appfd/register.html', {'form': form})
else:
print "that's a new email"
password = form.cleaned_data['password']
user = User.objects.create(email=email, username=email, is_active=False)
user.set_password(password)
user.save()
print "created user", user
#create activation key
#todo: make activation key of variable length
key = get_random_string(length=175)
Activation.objects.create(expired=False, key=key, used=False, user=user)
print "send an activation email"
link = "http://" + host + "/activate/" + key
try: send_mail("[First Draft] Confirm Your Email Address", "Please click the following link in order to activate the account: " + link, "123489123401238476123847123412341234l@gmail.com", [user.email], fail_silently=False, html_message="<h3>Please click the following link in order to activate the account:</h3></br></br><a href='" + link + "'>" + link + "</a>")
except Exception as e: print e
#login after registration
user.backend = 'django.contrib.auth.backends.ModelBackend'
auth.login(request, user)
print "logged in user"
print "return to the homepage after a succesful registration"
return render(request, 'appfd/register_success.html', {})
else:
print "form is valid"
return render(request, 'appfd/register.html', {'form': form})
else:
print "request.method is probably get"
return render(request, 'appfd/register.html', {})
def team(request):
team_members = TeamMember.objects.all()
return render(request, "appfd/team.html", {'team_members': team_members})
def create(job):
print "starting create with", job
key = job['key']
text = job['data']
# basically this is a hack, so that if you paste in text
# it assumes everything that is capitalized could be a place
names = list(set(findall("(?:[A-Z][a-z]{1,15} )*(?:de )?[A-Z][a-z]{1,15}", text)))
print "names are", names
features = resolve_locations(extract_locations_with_context(text, names))
featureCollection = FeatureCollection(features)
serialized = geojson.dumps(featureCollection, sort_keys=True)
# make directory to store files
directory = "/home/usrfd/maps/" + key + "/"
mkdir(directory)
path_to_geojson = directory + key + ".geojson"
with open(path_to_geojson, "wb") as f:
print "writing"
f.write(serialized)
print "wrote"
print "listdir is", isfile(path_to_geojson)
finish_order(key)
def create_map_from_link(job):
print "starting create_map_from_link with", job['key']
key = job['key']
# make directory to store saved webpage and maps
directory = "/home/usrfd/maps/" + key + "/"
mkdir(directory)
# get url
link = job['link'].strip()
# get web page text
filename = link.replace("/","_").replace("\\","_").replace("'","_").replace('"',"_").replace(".","_").replace(":","_").replace("__","_")
if not link.startswith("http"):
print "we assume that the user didn't include the protocol"
link = "http://" + link
headers = {"User-Agent": getRandomUserAgentString()}
text = bnlp_clean(get(link, headers=headers).text)
# save text to file
with open(directory + filename, "wb") as f:
f.write(text.encode('utf-8'))
features = resolve_locations(extract_locations_with_context(text))
featureCollection = FeatureCollection(features)
serialized = geojson.dumps(featureCollection, sort_keys=True)
# make directory to store files
path_to_geojson = directory + key + ".geojson"
with open(path_to_geojson, "wb") as f:
f.write(serialized)
finish_order(key)
def create_csv_from_geojson(path_to_geojson):
try:
print "starting create_csv_from_geojson with ", path_to_geojson
cwd = '/'.join(path_to_geojson.split('/')[0:-1]) + '/'
filename_base = path_to_geojson.split("/")[-1].split(".")[0]
filename_csv = filename_base + '.csv'
path_to_csv = cwd + filename_csv
call(['ogr2ogr','-f','CSV', path_to_csv, path_to_geojson, '-lco', 'GEOMETRY=AS_XY'])
except Exception as e:
print '\nERROR in create_shapefile_from_geojson', e,'\n'
def create_shapefile_from_geojson(path_to_geojson):
try:
print "starting create_shapefile_from_geojson"
cwd = '/'.join(path_to_geojson.split('/')[0:-1]) + '/'
filename_base = path_to_geojson.split("/")[-1].split(".")[0]
filename_zip = filename_base + '.zip'
filename_dbf = filename_base + '.dbf'
filename_prj = filename_base + '.prj'
filename_shx = filename_base + '.shx'
filename_shp = filename_base + '.shp'
path_to_shp = cwd + filename_shp
print "path_to_geojson is", path_to_geojson
call(['ogr2ogr','-f','ESRI Shapefile', path_to_shp, path_to_geojson])
try:
call(['zip', filename_zip, filename_dbf, filename_prj, filename_shx, filename_shp], cwd=cwd)
except Exception as e:
print "ERROR X", e
# remove leftover shapefile parts
remove(cwd+filename_dbf)
remove(cwd+filename_prj)
remove(cwd+filename_shx)
remove(cwd+filename_shp)
except Exception as e:
print '\nERROR in create_shapefile_from_geojson', e,'\n'
def create_map_from_link_to_file(job):
print "starting create_from_file with", job
# make directory to store saved webpage and maps
directory = "/home/usrfd/maps/" + job['key'] + "/"
mkdir(directory)
# get url
link = job['link']
# get web page text
filename = link.replace("/","_").replace("\\","_").replace("'","_").replace('"',"_").replace(":","_").replace("__","_")
# create path to file
path_to_file = directory + filename
# save file to folder
urlretrieve(link, path_to_file)
mimeType = from_file(path_to_file, mime=True)
print "mimeType is", mimeType
job['filename'] = filename
job['filepath'] = path_to_file
if filename.endswith(('.xls','.xlsm','.xlsx')):
create_from_xl(job)
elif filename.endswith('.csv'):
create_map_from_csv(job)
finish_order(key)
def finish_order(key):
path_to_geojson = "/home/usrfd/maps/" + key + "/" + key + ".geojson"
create_shapefile_from_geojson(path_to_geojson)
create_csv_from_geojson(path_to_geojson)
from django.db import connection
connection.close()
order = Order.objects.get(token=key).finish()
print "finished order", order
def create_from_file(job):
print "starting create_from_file with", job
content_type = job['file'].content_type
print "content_type is", content_type
# .split("/")[-1] prevents would-be hackers from passing in paths as filenames
job['filename'] = filename = job['file'].name.split("/")[-1]
if filename.endswith(('.xls','.xlsx')):
print "user uploaded an excel file!"
create_from_xl(job)
elif filename.endswith('.csv'):
print "user uploaded a csv file!"
create_map_from_csv(job)
elif filename.endswith(".pdf"):
print "user uploaded a pdf file!"
create_map_from_pdf(job)
elif filename.endswith(".docx"):
print "user uploaded a docx file!"
create_map_from_docx(job)
finish_order(job['key'])
def create_map_from_pdf(job):
print "starting create_map_from_pdf with", job
# unpack job dictionary key, file and maybe filepath
key = job['key']
filename = job['filename']
directory = "/home/usrfd/maps/" + key + "/"
file_obj = job['file']
if 'filepath' not in job:
# make directory to store excel file and maps
mkdir(directory)
filepath = directory + "/" + filename
print "filepath = ", filepath
# save file to disk
with open(filepath, 'wb+') as destination:
for chunk in file_obj.chunks():
destination.write(chunk)
print "wrote file"
locations = extract_locations_with_context(file_obj)
print "in views, locations are", len(locations)
features = resolve_locations(locations)
print "in views, features are", len(features)
featureCollection = FeatureCollection(features)
serialized = geojson.dumps(featureCollection, sort_keys=True)
# make directory to store files
path_to_geojson = directory + key + ".geojson"
with open(path_to_geojson, "wb") as f:
f.write(serialized)
def create_map_from_csv(job):
print "starting create_map_from_csv with", job
directory = "/home/usrfd/maps/" + job['key'] + "/"
filename = job['filename']
if 'filepath' not in job:
file_obj = job['file']
# make directory to store excel file and maps
mkdir(directory)
filepath = directory + "/" + filename
# save file to disk
with open(filepath, 'wb+') as destination:
for chunk in file_obj.chunks():
destination.write(chunk)
print "wrote file"
else:
filepath = job['filepath']
rows = []
f = open(filepath, 'r')
reader = csv.reader(f)
#first ten lines
top_lines = []
for index, line in enumerate(reader):
top_lines.append(line)
if index == 3:
break
print "top_lines are", top_lines
headerRow = tables.getHeaderRow(top_lines)
print "headeRow is", headerRow
location_column_index = tables.getLocationColumn(top_lines)
print "location_column_index is", location_column_index
features = []
for row_index, row in enumerate(top_lines):
if headerRow and row_index == 0:
pass
else:
geometry = None
properties = {}
for column_index, value in enumerate(row):
value = tables.clean(value)
if column_index == location_column_index:
place = resolve.resolve(value)
if place:
point = place.point
geometry = Point((point.x,point.y))
if headerRow:
properties[headerRow[column_index]] = value
else:
properties[column_index] = value
feature = Feature(geometry=geometry, properties=properties)
features.append(feature)
for row_index, row in enumerate(reader):
geometry = None
properties = {}
for column_index, value in enumerate(row):
value = tables.clean(value)
if column_index == location_column_index:
place = resolve.resolve(value)
if place:
point = place.point
geometry = Point((point.x,point.y))
if headerRow:
properties[headerRow[column_index]] = value
else:
properties[column_index] = value
feature = Feature(geometry=geometry, properties=properties)
features.append(feature)
featureCollection = FeatureCollection(features)
serialized = geojson.dumps(featureCollection, sort_keys=True)
with open(directory + filename.split(".")[0] + "." + "geojson", "wb") as f:
f.write(serialized)
print "finished creating geojson from csv file"
f.close()
def create_map_from_docx(job):
print "starting create_from_docx with", job
directory = "/home/usrfd/maps/" + job['key'] + "/"
filename = job['filename']
if 'filepath' not in job:
file_obj = job['file']
# make directory to store excel file and maps
mkdir(directory)
filepath = directory + "/" + filename
# save file to disk
with open(filepath, 'wb+') as destination:
for chunk in file_obj.chunks():
destination.write(chunk)
print "wrote file"
else:
filepath = job['filepath']
document = Document(job['file'])
print "documenbt = document"
text = "\r\n\r\n".join([paragraph.text for paragraph in document.paragraphs])
print "text is", text[:500]
# convert to list of list of column values
for table in document.tables:
columns = []
for column in table.columns:
values = [cell.text for cell in column.cells]
columns.append(values)
print "columns are ", columns
locations = extract_locations_with_context(text)
print "in views, locations are", len(locations)
features = resolve_locations(locations)
print "in views, features are", len(features)
featureCollection = FeatureCollection(features)
serialized = geojson.dumps(featureCollection, sort_keys=True)
# make directory to store files
path_to_geojson = directory + job['key'] + ".geojson"
with open(path_to_geojson, "wb") as f:
f.write(serialized)
def create_from_xl(job):
print "starting create_from_xl with", job
directory = "/home/usrfd/maps/" + job['key'] + "/"
filename = job['filename']
if 'filepath' not in job:
file_obj = job['file']
# make directory to store excel file and maps
mkdir(directory)
filepath = directory + "/" + filename
# save file to disk
with open(filepath, 'wb+') as destination:
for chunk in file_obj.chunks():
destination.write(chunk)
print "wrote file"
else:
filepath = job['filepath']
wb = load_workbook(filepath)
print "wb is", wb
features = []
for sheet in wb:
rows = sheet.rows
headerRow = getHeaderRow(rows[0], rows)
location_column_index = getLocationColumn(sheet)
print "location_column_index is", location_column_index
for row_index, row in enumerate(rows):
if headerRow:
if row_index == 0:
pass
else:
geometry = None
properties = {}
for cell_index, cell in enumerate(row):
value = cleanCellValue(cell.value)
if cell_index == location_column_index:
place = resolve.resolve(value)
if place:
point = place.point
geometry = Point((point.x,point.y))
properties[headerRow[cell_index]] = value
feature = Feature(geometry=geometry, properties=properties)
features.append(feature)
else: #not headerRow
geometry = None
properties = {}
for cell_index, cell in enumerate(row):
value = cleanCellValue(cell.value)
if cell_index == location_column_index:
#strip makes sure we remove any white space
place = resolve.resolve(value)
if place:
point = place.point
geometry = Point((point.x,point.y))
properties[cell_index] = value
feature = Feature(geometry=geometry, properties=properties)
features.append(feature)
featureCollection = FeatureCollection(features)
serialized = geojson.dumps(featureCollection, sort_keys=True)
with open(directory + filename.split(".")[0] + "." + "geojson", "wb") as f:
f.write(serialized)
print "finished creating geojson from excel file"
def does_map_exist(request, job, extension):
print "starting does_map_exist"
try:
print "starting get_map with", job, extension
return HttpResponse(isfile("/home/usrfd/maps/" + job + "/" + job + "." + extension))
except Exception as e:
print e
return HttpResponse("")
#basically looks for the directory that corresponds to the job
# and returns whatever file in their that ends with geojson
def get_map(request, job, extension):
try:
print "starting get_map with", job, extension
path_to_directory = "/home/usrfd/maps/" + job + "/"
# currently, loads zip file in memory and returns it
# todo: use mod_xsendfile, so don't load into memory
if extension in ("shp","zip"):
filename = job + ".zip"
abspath = path_to_directory + filename
with open(abspath, "rb") as zip_file:
response = HttpResponse(zip_file, content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
return response
else:
data = ""
for filename in listdir(path_to_directory):
print "for filename"
if filename.endswith("."+extension):
with open(path_to_directory + filename) as f:
data = f.read()
break
return HttpResponse(data, content_type='application/json')
except Exception as e:
print e
# this method takes in data as text and returns a geojson of the map
def upload(request):
print "starting upload"
if request.method == 'POST':
print "request.method is post"
key = get_random_string(25)
Order.objects.create(token=key)
from django.db import connection
connection.close()
job = {
'data': loads(request.body)['story'],
'key': key
}
Process(target=create, args=(job,)).start()
return HttpResponse(job['key'])
else:
return HttpResponse("You have to post!")
def start_link(request):
try:
print "starting start_link"
if request.method == 'POST':
print "request.method is post"
key = get_random_string(25)
Order.objects.create(token=key)
from django.db import connection
connection.close()
job = {
'link': loads(request.body)['link'],
'key': key
}
Process(target=create_map_from_link, args=(job,)).start()
return HttpResponse(job['key'])
else:
return HttpResponse("You have to post!")
except Exception as e:
print e
def start_link_to_file(request):
try:
print "starting start_link"
if request.method == 'POST':
print "request.method is post"
key = get_random_string(25)
Order.objects.create(token=key)
from django.db import connection
connection.close()
job = {
'link': loads(request.body)['link'],
'key': key
}
Process(target=create_map_from_link_to_file, args=(job,)).start()
return HttpResponse(job['key'])
else:
return HttpResponse("You have to post!")
except Exception as e:
print e
def thanks(request):
with open("/home/usrfd/firstdraft/requirements.txt") as f:
list_to_thank = [("Clavin","https://github.com/Berico-Technologies/CLAVIN"),("Leaflet", "leafletjs.com"),("American Red Cross","https://github.com/americanredcross"),("OpenStreetMap","https://www.openstreetmap.org/")]
list_to_thank += [(package.split("(")[0].strip(), None) for package in f.read().strip().split("\n")]
return render(request, "appfd/thanks.html", {'list_to_thank': list_to_thank})
def upload_file(request):
try:
print "starting upload_file"
if request.method == 'POST':
print "request.method is post"
form = UploadFileForm(request.POST, request.FILES)
print "form is", form
if form.is_valid():
print "form is valid"
key = get_random_string(25)
Order.objects.create(token=key)
from django.db import connection
connection.close()
job = {
'file': request.FILES['file'],
'key': key
}
print "job is", job
Process(target=create_from_file, args=(job,)).start()
return HttpResponse(job['key'])
else:
print form.errors
return HttpResponse("post data was malformed")
else:
return HttpResponse("You have to post!")
except Exception as e:
print "e is", e
def view_map(request, job):
return render(request, "appfd/view_map.html", {'job': job})
| |
"""
textureatlas
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import itertools
from OpenGL import GL
import numpy
from mcedit2.util.load_png import loadPNGData
from mcedit2.rendering.lightmap import generateLightmap
from mcedit2.resourceloader import ResourceLoader, ResourceNotFound
from mcedit2.util import glutils
from mcedit2.util.resources import resourcePath
from mceditlib import util
log = logging.getLogger(__name__)
class TextureSlot(object):
def __init__(self, left, top, right, bottom):
self.left = left
self.top = top
self.right = right
self.bottom = bottom
self.textures = []
@property
def width(self):
return self.right - self.left
@property
def height(self):
return self.bottom - self.top
def addTexture(self, name, w, h, d):
if w > self.width or h > self.height:
return False
self.textures.append((name, self.left, self.top, w, h, d))
if self.width > self.height:
self.left += w
else:
self.top += h
return True
def allTextureNames(blocktypes):
for b in blocktypes:
yield b.internalName
class TextureAtlas(object):
def __init__(self, world, resourceLoader, blockModels, maxLOD=0, overrideMaxSize=None):
"""
Important members:
textureData: RGBA Texture Data as a numpy array.
texCoordsByName: Dictionary of texture coordinates. Usable for textures loaded using the extraTextures argument
or from block definitions.
Maps "texture_name" -> (left, top, right, bottom)
:param world:
:type world: mceditlib.worldeditor.WorldEditor
:param resourceLoader:
:type resourceLoader: mcedit2.resourceloader.ResourceLoader
:param blockModels:
:type blockModels: mcedit2.rendering.blockmodels.BlockModels
:param maxLOD: Adds wrapped borders to each texture to allow mipmapping at this level of detail
:type maxLOD: int
:param overrideMaxSize: Override the maximum texture size - ONLY use for testing TextureAtlas without creating a GL context.
:type overrideMaxSize: int or None
:return:
:rtype: TextureAtlas
"""
self.overrideMaxSize = overrideMaxSize
self.blockModels = blockModels
self.blocktypes = world.blocktypes
self._filename = world.filename
self.resourceLoader = resourceLoader
self._lightTexture = None
self._terrainTexture = None
self._maxLOD = maxLOD
names = set()
self._rawTextures = rawTextures = []
assert "MCEDIT_UNKNOWN" in blockModels.getTextureNames()
for filename in blockModels.getTextureNames():
if filename in names:
continue
try:
f = self._openImageStream(filename)
rawTextures.append((filename,) + loadPNGData(f.read()))
names.add(filename)
log.debug("Loaded texture %s", filename)
except ResourceNotFound as e:
log.error("Could not load texture %s: %r", filename, e)
except Exception as e:
log.exception("%s while loading texture '%s', skipping...", e, filename)
rawSize = sum(a.nbytes for (n, w, h, a) in rawTextures)
log.info("Preloaded %d textures for world %s (%i kB)",
len(self._rawTextures), util.displayName(self._filename), rawSize/1024)
def load(self):
if self._terrainTexture:
return
if self.overrideMaxSize is None:
maxSize = getGLMaximumTextureSize()
else:
maxSize = self.overrideMaxSize
maxLOD = min(4, self._maxLOD)
if not bool(GL.glGenerateMipmap):
maxLOD = 0
if maxLOD:
borderSize = 1 << (maxLOD - 1)
else:
borderSize = 0
slots = []
atlasWidth = 0
atlasHeight = 0
self._rawTextures.sort(key=lambda (_, w, h, __): max(w, h), reverse=True)
for path, w, h, data in self._rawTextures:
w += borderSize * 2
h += borderSize * 2
for slot in slots:
if slot.addTexture(path, w, h, data):
log.debug("Slotting %s into an existing slot", path)
break
else:
if atlasHeight < 24 * atlasWidth and atlasHeight + h < maxSize:
# Prefer to lay out textures vertically, since animations are vertical strips
slots.append(TextureSlot(0, atlasHeight, max(atlasWidth, w), atlasHeight + h))
atlasWidth = max(atlasWidth, w)
atlasHeight = atlasHeight + h
else:
slots.append(TextureSlot(atlasWidth, 0, atlasWidth + w, max(atlasHeight, h)))
atlasWidth = atlasWidth + w
atlasHeight = max(atlasHeight, h)
if atlasWidth > maxSize or atlasHeight > maxSize:
raise ValueError("Building texture atlas: Textures too large for maximum texture size. (Needed "
"%s, only got %s", (atlasWidth, atlasHeight), (maxSize, maxSize))
if not slots[-1].addTexture(path, w, h, data):
raise ValueError("Building texture atlas: Internal error.")
log.debug("Slotting %s into a newly created slot", path)
self.textureData = texData = numpy.zeros((atlasHeight, atlasWidth, 4), dtype='uint8')
self.textureData[:] = [0xff, 0x0, 0xff, 0xff]
self.texCoordsByName = {}
b = borderSize
for slot in slots:
for name, left, top, width, height, data in slot.textures:
log.debug("Texture %s at (%d,%d,%d,%d)", name, left, top, width, height)
texDataView = texData[top:top + height, left:left + width]
if b:
texDataView[b:-b, b:-b] = data
# Wrap texture edges to avoid antialiasing bugs at edges of blocks
texDataView[-b:, b:-b] = data[:b]
texDataView[:b, b:-b] = data[-b:]
texDataView[:, -b:] = texDataView[:, b:2 * b]
texDataView[:, :b] = texDataView[:, -b * 2:-b]
else:
texDataView[:] = data
self.texCoordsByName[name] = left + b, top + b, width - 2 * b, height - 2 * b
def _load():
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, atlasWidth, atlasHeight, 0, GL.GL_RGBA,
GL.GL_UNSIGNED_BYTE, self.textureData.ravel())
if self.overrideMaxSize is None:
if maxLOD:
minFilter = GL.GL_NEAREST_MIPMAP_LINEAR
else:
minFilter = None
self._terrainTexture = glutils.Texture(_load, minFilter=minFilter, maxLOD=maxLOD)
self._terrainTexture.load()
else:
self._terrainTexture = object()
self.width = atlasWidth
self.height = atlasHeight
totalSize = atlasWidth * atlasHeight * 4
usedSize = sum(sum(width * height for _, _, _, width, height, _ in slot.textures) for slot in slots) * 4
log.info("Terrain atlas created for world %s (%d/%d kB)", util.displayName(self._filename), usedSize / 1024,
totalSize / 1024)
self.blockModels.cookQuads(self)
#file("terrain-%sw-%sh.raw" % (atlasWidth, atlasHeight), "wb").write(texData.tostring())
#raise SystemExit
def _openImageStream(self, name):
if name == "MCEDIT_UNKNOWN":
block_unknown = resourcePath("mcedit2/assets/mcedit2/block_unknown.png")
return file(block_unknown, "rb")
return self.resourceLoader.openStream(name)
def bindTerrain(self):
self._terrainTexture.bind()
_dayTime = 1.0
@property
def dayTime(self):
return self._dayTime
@dayTime.setter
def dayTime(self, value):
self._dayTime = value
del self._lightTexture
_minBrightness = 0.0
@property
def minBrightness(self):
return self._minBrightness
@minBrightness.setter
def minBrightness(self, value):
self._minBrightness = value
del self._lightTexture
def bindLight(self):
if self._lightTexture is None:
self._lightTexture = _makeLightTexture(self.dayTime, self.minBrightness)
self._lightTexture.bind()
def dispose(self):
if self._terrainTexture:
self._terrainTexture.dispose()
if self._lightTexture:
self._lightTexture.dispose()
def _makeLightTexture(dayTime=1.0, minBrightness=1.0):
def _loadLightTexture():
pixels = generateLightmap(dayTime)
pixels.clip(int(minBrightness * 255), 255, pixels)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, 16, 16, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, pixels.ravel())
return glutils.Texture(_loadLightTexture)
_maxSize = None
def getGLMaximumTextureSize():
global _maxSize
if _maxSize == None:
_maxSize = _getMaxSize()
return _maxSize
def _getMaxSize():
size = 16384
while size > 0:
size /= 2
GL.glTexImage2D(GL.GL_PROXY_TEXTURE_2D, 0, GL.GL_RGBA, size, size, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, None)
maxsize = GL.glGetTexLevelParameteriv(GL.GL_PROXY_TEXTURE_2D, 0, GL.GL_TEXTURE_WIDTH)
if maxsize:
return maxsize
return -1
def test_TextureAtlas():
rl = ResourceLoader()
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from operator import attrgetter
from flask import flash, jsonify, request, session
from sqlalchemy.orm import joinedload, subqueryload
from indico.core.db import db
from indico.modules.events.abstracts.controllers.base import RHManageAbstractsBase
from indico.modules.events.abstracts.controllers.common import (AbstractsDownloadAttachmentsMixin, AbstractsExportCSV,
AbstractsExportExcel, AbstractsExportPDFMixin,
CustomizeAbstractListMixin, DisplayAbstractListMixin)
from indico.modules.events.abstracts.forms import BulkAbstractJudgmentForm
from indico.modules.events.abstracts.lists import AbstractListGeneratorManagement
from indico.modules.events.abstracts.models.abstracts import Abstract, AbstractState
from indico.modules.events.abstracts.models.persons import AbstractPersonLink
from indico.modules.events.abstracts.operations import create_abstract, delete_abstract, judge_abstract
from indico.modules.events.abstracts.schemas import abstract_review_questions_schema, abstracts_schema
from indico.modules.events.abstracts.util import can_create_invited_abstracts, make_abstract_form
from indico.modules.events.abstracts.views import WPManageAbstracts
from indico.modules.events.contributions.models.persons import AuthorType
from indico.modules.events.util import get_field_values
from indico.modules.users.models.users import User
from indico.util.i18n import _, ngettext
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
class RHAbstractListBase(RHManageAbstractsBase):
"""Base class for all RHs using the abstract list generator."""
def _process_args(self):
RHManageAbstractsBase._process_args(self)
self.list_generator = AbstractListGeneratorManagement(event=self.event)
class RHManageAbstractsActionsBase(RHAbstractListBase):
"""Base class for RHs performing actions on selected abstracts."""
_abstract_query_options = ()
@property
def _abstract_query(self):
query = Abstract.query.with_parent(self.event)
if self._abstract_query_options:
query = query.options(*self._abstract_query_options)
return query
def _process_args(self):
RHAbstractListBase._process_args(self)
ids = request.form.getlist('abstract_id', type=int)
self.abstracts = self._abstract_query.filter(Abstract.id.in_(ids)).all()
class RHBulkAbstractJudgment(RHManageAbstractsActionsBase):
"""Perform bulk judgment operations on selected abstracts."""
def _process(self):
form = BulkAbstractJudgmentForm(event=self.event, abstract_id=[a.id for a in self.abstracts],
judgment=request.form.get('judgment'))
if form.validate_on_submit():
judgment_data, abstract_data = form.split_data
submitted_abstracts = {abstract for abstract in self.abstracts if abstract.state == AbstractState.submitted}
for abstract in submitted_abstracts:
judge_abstract(abstract, abstract_data, judge=session.user, **judgment_data)
num_judged_abstracts = len(submitted_abstracts)
num_prejudged_abstracts = len(self.abstracts) - num_judged_abstracts
if num_judged_abstracts:
flash(ngettext("One abstract has been judged.",
"{num} abstracts have been judged.",
num_judged_abstracts).format(num=num_judged_abstracts), 'success')
if num_prejudged_abstracts:
flash(ngettext("One abstract has been skipped since it is already judged.",
"{num} abstracts have been skipped since they are already judged.",
num_prejudged_abstracts).format(num=num_prejudged_abstracts), 'warning')
return jsonify_data(**self.list_generator.render_list())
return jsonify_form(form=form, fields=form._order, submit=_('Judge'), disabled_until_change=False)
class RHAbstractList(DisplayAbstractListMixin, RHAbstractListBase):
template = 'management/abstract_list.html'
view_class = WPManageAbstracts
def _render_template(self, **kwargs):
kwargs['track_session_map'] = {track.id: track.default_session_id for track in self.event.tracks}
can_create = can_create_invited_abstracts(self.event)
return super()._render_template(can_create_invited_abstracts=can_create, **kwargs)
class RHAbstractListCustomize(CustomizeAbstractListMixin, RHAbstractListBase):
view_class = WPManageAbstracts
ALLOW_LOCKED = True
class RHAbstractListStaticURL(RHAbstractListBase):
"""Generate a static URL for the configuration of the abstract list."""
ALLOW_LOCKED = True
def _process(self):
return jsonify(url=self.list_generator.generate_static_url())
class RHCreateAbstract(RHAbstractListBase):
def _process(self):
is_invited = request.args.get('invited') == '1'
abstract_form_class = make_abstract_form(self.event, session.user, notification_option=True,
management=self.management, invited=is_invited)
form = abstract_form_class(event=self.event, management=self.management, invited=is_invited)
if is_invited:
del form.submitted_contrib_type
del form.attachments
del form.send_notifications
del form.person_links
if form.validate_on_submit():
data = form.data
submitter = None
if is_invited:
if form.users_with_no_account.data == 'existing':
submitter = data['submitter']
else:
submitter = User(first_name=data['first_name'], last_name=data['last_name'], email=data['email'],
is_pending=True)
db.session.add(submitter)
db.session.flush()
data.pop('first_name')
data.pop('last_name')
data.pop('email')
data.pop('users_with_no_account')
data.pop('submitter')
send_notifications = data.pop('send_notifications', is_invited)
abstract = create_abstract(self.event, *get_field_values(data), send_notifications=send_notifications,
submitter=submitter, is_invited=is_invited)
flash(_("Abstract '{}' created successfully").format(abstract.title), 'success')
tpl_components = self.list_generator.render_list(abstract)
if tpl_components.get('hide_abstract'):
self.list_generator.flash_info_message(abstract)
return jsonify_data(**tpl_components)
return jsonify_form(form, back=_("Cancel"), form_header_kwargs={'action': request.relative_url})
class RHDeleteAbstracts(RHManageAbstractsActionsBase):
def _process(self):
delete_contribs = request.values.get('delete_contribs') == '1'
deleted_contrib_count = 0
for abstract in self.abstracts:
if delete_contribs and abstract.contribution:
deleted_contrib_count += 1
delete_abstract(abstract, delete_contribs)
deleted_abstract_count = len(self.abstracts)
flash(ngettext("The abstract has been deleted.",
"{count} abstracts have been deleted.", deleted_abstract_count)
.format(count=deleted_abstract_count), 'success')
if deleted_contrib_count:
flash(ngettext("The linked contribution has been deleted.",
"{count} linked contributions have been deleted.", deleted_contrib_count)
.format(count=deleted_contrib_count), 'success')
return jsonify_data(**self.list_generator.render_list())
class RHAbstractPersonList(RHManageAbstractsActionsBase):
"""List of persons somehow related to abstracts (co-authors, speakers...)."""
ALLOW_LOCKED = True
@property
def _membership_filter(self):
abstract_ids = {abstract.id for abstract in self.abstracts}
return Abstract.id.in_(abstract_ids)
def _process(self):
submitters = {abstract.submitter for abstract in self.abstracts}
abstract_persons = (AbstractPersonLink.query
.filter(AbstractPersonLink.abstract.has(self._membership_filter))
.all())
abstract_persons_dict = defaultdict(lambda: {'speaker': False, 'submitter': False, 'primary_author': False,
'secondary_author': False})
for abstract_person in abstract_persons:
dict_key = abstract_person.person.user if abstract_person.person.user else abstract_person.person
person_roles = abstract_persons_dict[dict_key]
person_roles['speaker'] |= abstract_person.is_speaker
person_roles['primary_author'] |= abstract_person.author_type == AuthorType.primary
person_roles['secondary_author'] |= abstract_person.author_type == AuthorType.secondary
for submitter in submitters:
abstract_persons_dict[submitter]['submitter'] |= True
return jsonify_template('events/abstracts/management/abstract_person_list.html',
event_persons=abstract_persons_dict, event=self.event)
class RHManageAbstractsExportActionsBase(RHManageAbstractsActionsBase):
ALLOW_LOCKED = True
class RHAbstractsDownloadAttachments(AbstractsDownloadAttachmentsMixin, RHManageAbstractsExportActionsBase):
pass
class RHAbstractsExportPDF(AbstractsExportPDFMixin, RHManageAbstractsExportActionsBase):
pass
class RHAbstractsExportCSV(AbstractsExportCSV, RHManageAbstractsExportActionsBase):
pass
class RHAbstractsExportExcel(AbstractsExportExcel, RHManageAbstractsExportActionsBase):
pass
class RHAbstractsExportJSON(RHManageAbstractsExportActionsBase):
_abstract_query_options = (joinedload('submitter'),
joinedload('accepted_track'),
joinedload('accepted_contrib_type'),
joinedload('submitted_contrib_type'),
subqueryload('comments'),
subqueryload('field_values'),
subqueryload('submitted_for_tracks'),
subqueryload('reviewed_for_tracks'),
subqueryload('person_links'),
subqueryload('reviews').joinedload('ratings').joinedload('question'))
def _process(self):
abstracts = abstracts_schema.dump(sorted(self.abstracts, key=attrgetter('friendly_id')))
questions = abstract_review_questions_schema.dump(self.event.abstract_review_questions)
response = jsonify(version=1, abstracts=abstracts, questions=questions)
response.headers['Content-Disposition'] = 'attachment; filename="abstracts.json"'
return response
| |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Utility functions and for loading neurons'''
import glob
import logging
import os
import shutil
import tempfile
import uuid
from functools import partial
from io import IOBase, open
from neurom._compat import StringType, filter
from neurom.core.population import Population
from neurom.exceptions import NeuroMError, RawDataError
from neurom.fst._core import FstNeuron
from neurom.io import neurolucida, swc
from neurom.io.datawrapper import DataWrapper
L = logging.getLogger(__name__)
def _is_morphology_file(filepath):
""" Check if `filepath` is a file with one of morphology file extensions. """
return (
os.path.isfile(filepath) and
os.path.splitext(filepath)[1].lower() in ('.swc', '.h5', '.asc')
)
class NeuronLoader(object):
"""
Caching morphology loader.
Arguments:
directory: path to directory with morphology files
file_ext: file extension to look for (if not set, will pick any of .swc|.h5|.asc)
cache_size: size of LRU cache (if not set, no caching done)
"""
def __init__(self, directory, file_ext=None, cache_size=None):
self.directory = directory
self.file_ext = file_ext
if cache_size is not None:
from pylru import FunctionCacheManager
self.get = FunctionCacheManager(self.get, size=cache_size)
def _filepath(self, name):
""" File path to `name` morphology file. """
if self.file_ext is None:
candidates = glob.glob(os.path.join(self.directory, name + ".*"))
try:
return next(filter(_is_morphology_file, candidates))
except StopIteration:
raise NeuroMError("Can not find morphology file for '%s' " % name)
else:
return os.path.join(self.directory, name + self.file_ext)
# pylint:disable=method-hidden
def get(self, name):
""" Get `name` morphology data. """
return load_neuron(self._filepath(name))
def get_morph_files(directory):
'''Get a list of all morphology files in a directory
Returns:
list with all files with extensions '.swc' , 'h5' or '.asc' (case insensitive)
'''
lsdir = (os.path.join(directory, m) for m in os.listdir(directory))
return list(filter(_is_morphology_file, lsdir))
def get_files_by_path(path):
'''Get a file or set of files from a file path
Return list of files with path
'''
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
return get_morph_files(path)
raise IOError('Invalid data path %s' % path)
def load_neuron(handle, reader=None):
'''Build section trees from an h5 or swc file'''
rdw = load_data(handle, reader)
if isinstance(handle, StringType):
name = os.path.splitext(os.path.basename(handle))[0]
else:
name = None
return FstNeuron(rdw, name)
def load_neurons(neurons,
neuron_loader=load_neuron,
name=None,
population_class=Population,
ignored_exceptions=()):
'''Create a population object from all morphologies in a directory\
of from morphologies in a list of file names
Parameters:
neurons: directory path or list of neuron file paths
neuron_loader: function taking a filename and returning a neuron
population_class: class representing populations
name (str): optional name of population. By default 'Population' or\
filepath basename depending on whether neurons is list or\
directory path respectively.
Returns:
neuron population object
'''
if isinstance(neurons, (list, tuple)):
files = neurons
name = name if name is not None else 'Population'
elif isinstance(neurons, StringType):
files = get_files_by_path(neurons)
name = name if name is not None else os.path.basename(neurons)
ignored_exceptions = tuple(ignored_exceptions)
pop = []
for f in files:
try:
pop.append(neuron_loader(f))
except NeuroMError as e:
if isinstance(e, ignored_exceptions):
L.info('Ignoring exception "%s" for file %s',
e, os.path.basename(f))
continue
raise
return population_class(pop, name=name)
def _get_file(handle):
'''Returns the filename of the file to read
If handle is a stream, a temp file is written on disk first
and its filename is returned'''
if not isinstance(handle, IOBase):
return handle
fd, temp_file = tempfile.mkstemp(str(uuid.uuid4()), prefix='neurom-')
os.close(fd)
with open(temp_file, 'w') as fd:
handle.seek(0)
shutil.copyfileobj(handle, fd)
return temp_file
def load_data(handle, reader=None):
'''Unpack data into a raw data wrapper'''
if not reader:
reader = os.path.splitext(handle)[1][1:].lower()
if reader not in _READERS:
raise NeuroMError('Do not have a loader for "%s" extension' % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception('Error reading file %s, using "%s" loader', filename, reader)
raise RawDataError('Error reading file %s:\n%s' % (filename, str(e)))
def _load_h5(filename):
'''Delay loading of h5py until it is needed'''
from neurom.io import hdf5
return hdf5.read(filename,
remove_duplicates=False,
data_wrapper=DataWrapper)
_READERS = {
'swc': partial(swc.read,
data_wrapper=DataWrapper),
'h5': _load_h5,
'asc': partial(neurolucida.read,
data_wrapper=DataWrapper)
}
| |
from __future__ import absolute_import, unicode_literals
import uuid
from collections import defaultdict
from copy import copy
from django.conf.urls import url
from django.db import connections
from django.utils.translation import ugettext_lazy as _, ungettext_lazy as __
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql import views
from debug_toolbar.panels.sql.forms import SQLSelectForm
from debug_toolbar.panels.sql.tracking import unwrap_cursor, wrap_cursor
from debug_toolbar.panels.sql.utils import (
contrasting_color_generator, reformat_sql,
)
from debug_toolbar.utils import render_stacktrace
def get_isolation_level_display(vendor, level):
if vendor == 'postgresql':
import psycopg2.extensions
choices = {
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT: _("Autocommit"),
psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED: _("Read uncommitted"),
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED: _("Read committed"),
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ: _("Repeatable read"),
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE: _("Serializable"),
}
else:
raise ValueError(vendor)
return choices.get(level)
def get_transaction_status_display(vendor, level):
if vendor == 'postgresql':
import psycopg2.extensions
choices = {
psycopg2.extensions.TRANSACTION_STATUS_IDLE: _("Idle"),
psycopg2.extensions.TRANSACTION_STATUS_ACTIVE: _("Active"),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS: _("In transaction"),
psycopg2.extensions.TRANSACTION_STATUS_INERROR: _("In error"),
psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN: _("Unknown"),
}
else:
raise ValueError(vendor)
return choices.get(level)
class SQLPanel(Panel):
"""
Panel that displays information about the SQL queries run while processing
the request.
"""
def __init__(self, *args, **kwargs):
super(SQLPanel, self).__init__(*args, **kwargs)
self._offset = dict((k, len(connections[k].queries)) for k in connections)
self._sql_time = 0
self._num_queries = 0
self._queries = []
self._databases = {}
self._transaction_status = {}
self._transaction_ids = {}
def get_transaction_id(self, alias):
if alias not in connections:
return
conn = connections[alias].connection
if not conn:
return
if conn.vendor == 'postgresql':
cur_status = conn.get_transaction_status()
else:
raise ValueError(conn.vendor)
last_status = self._transaction_status.get(alias)
self._transaction_status[alias] = cur_status
if not cur_status:
# No available state
return None
if cur_status != last_status:
if cur_status:
self._transaction_ids[alias] = uuid.uuid4().hex
else:
self._transaction_ids[alias] = None
return self._transaction_ids[alias]
def record(self, alias, **kwargs):
self._queries.append((alias, kwargs))
if alias not in self._databases:
self._databases[alias] = {
'time_spent': kwargs['duration'],
'num_queries': 1,
}
else:
self._databases[alias]['time_spent'] += kwargs['duration']
self._databases[alias]['num_queries'] += 1
self._sql_time += kwargs['duration']
self._num_queries += 1
# Implement the Panel API
nav_title = _("SQL")
@property
def nav_subtitle(self):
return __("%d query in %.2fms", "%d queries in %.2fms",
self._num_queries) % (self._num_queries, self._sql_time)
@property
def title(self):
count = len(self._databases)
return __('SQL queries from %(count)d connection',
'SQL queries from %(count)d connections',
count) % {'count': count}
template = 'debug_toolbar/panels/sql.html'
@classmethod
def get_urls(cls):
return [
url(r'^sql_select/$', views.sql_select, name='sql_select'),
url(r'^sql_explain/$', views.sql_explain, name='sql_explain'),
url(r'^sql_profile/$', views.sql_profile, name='sql_profile'),
]
def enable_instrumentation(self):
# This is thread-safe because database connections are thread-local.
for connection in connections.all():
wrap_cursor(connection, self)
def disable_instrumentation(self):
for connection in connections.all():
unwrap_cursor(connection)
def generate_stats(self, request, response):
colors = contrasting_color_generator()
trace_colors = defaultdict(lambda: next(colors))
query_duplicates = defaultdict(lambda: defaultdict(int))
if self._queries:
width_ratio_tally = 0
factor = int(256.0 / (len(self._databases) * 2.5))
for n, db in enumerate(self._databases.values()):
rgb = [0, 0, 0]
color = n % 3
rgb[color] = 256 - n // 3 * factor
nn = color
# XXX: pretty sure this is horrible after so many aliases
while rgb[color] < factor:
nc = min(256 - rgb[color], 256)
rgb[color] += nc
nn += 1
if nn > 2:
nn = 0
rgb[nn] = nc
db['rgb_color'] = rgb
trans_ids = {}
trans_id = None
i = 0
for alias, query in self._queries:
query_duplicates[alias][query["raw_sql"]] += 1
trans_id = query.get('trans_id')
last_trans_id = trans_ids.get(alias)
if trans_id != last_trans_id:
if last_trans_id:
self._queries[(i - 1)][1]['ends_trans'] = True
trans_ids[alias] = trans_id
if trans_id:
query['starts_trans'] = True
if trans_id:
query['in_trans'] = True
query['alias'] = alias
if 'iso_level' in query:
query['iso_level'] = get_isolation_level_display(query['vendor'],
query['iso_level'])
if 'trans_status' in query:
query['trans_status'] = get_transaction_status_display(query['vendor'],
query['trans_status'])
query['form'] = SQLSelectForm(auto_id=None, initial=copy(query))
if query['sql']:
query['sql'] = reformat_sql(query['sql'])
query['rgb_color'] = self._databases[alias]['rgb_color']
try:
query['width_ratio'] = (query['duration'] / self._sql_time) * 100
query['width_ratio_relative'] = (
100.0 * query['width_ratio'] / (100.0 - width_ratio_tally))
except ZeroDivisionError:
query['width_ratio'] = 0
query['width_ratio_relative'] = 0
query['start_offset'] = width_ratio_tally
query['end_offset'] = query['width_ratio'] + query['start_offset']
width_ratio_tally += query['width_ratio']
query['stacktrace'] = render_stacktrace(query['stacktrace'])
i += 1
query['trace_color'] = trace_colors[query['stacktrace']]
if trans_id:
self._queries[(i - 1)][1]['ends_trans'] = True
# Queries are duplicates only if there's as least 2 of them.
# Also, to hide queries, we need to give all the duplicate groups an id
query_colors = contrasting_color_generator()
query_duplicates = dict(
(alias, dict(
(query, (duplicate_count, next(query_colors)))
for query, duplicate_count in queries.items()
if duplicate_count >= 2
))
for alias, queries in query_duplicates.items()
)
for alias, query in self._queries:
try:
duplicates_count, color = query_duplicates[alias][query["raw_sql"]]
query["duplicate_count"] = duplicates_count
query["duplicate_color"] = color
except KeyError:
pass
for alias, alias_info in self._databases.items():
try:
alias_info["duplicate_count"] = sum(e[0] for e in query_duplicates[alias].values())
except KeyError:
pass
self.record_stats({
'databases': sorted(self._databases.items(), key=lambda x: -x[1]['time_spent']),
'queries': [q for a, q in self._queries],
'sql_time': self._sql_time,
})
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the input_lib library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor as ragged_tensor_lib
from tensorflow.python.util import nest
class DistributedIteratorTestBase(test.TestCase):
# The passed input_context is to create a sharded dataset in between-graph
# case.
# TODO(yuefengz): rewrite the following method to make it less DRY.
def _wrap_iterator(self,
input_type,
dataset_or_input_fn,
input_workers,
devices,
num_replicas_in_sync,
strategy,
input_context=None):
# The `input_context` passed in is to shard dataset for
# MultiWorkerMirroredStrategy. It doesn't apply to in-graph case where
# multiple InputContexts are needed.
if input_type == "input_fn":
self.assertIsNone(
input_context,
msg=("`The input_context` arg is only used to shard dataset in "
"`MultiWorkerMirroredStrategy` when the input type is dataset."))
input_contexts = []
for i in range(input_workers.num_workers):
input_contexts.append(
distribute_lib.InputContext(
# Note: `input_workers.num_workers` is always 1 in between-graph
# case.
num_input_pipelines=input_workers.num_workers,
input_pipeline_id=i,
num_replicas_in_sync=len(devices)))
iterator = input_lib.InputFunctionIterator(
dataset_or_input_fn,
input_workers,
input_contexts,
strategy)
else:
iterator = input_lib.DatasetIterator(
dataset_or_input_fn,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
return iterator
def _wrap_dataset(self,
input_type,
dataset,
input_workers,
num_replicas_in_sync,
strategy,
input_context=None):
if input_type == "dataset":
if tf2.enabled():
return input_lib.DistributedDataset(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return input_lib.DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return strategy.distribute_datasets_from_function(dataset)
def _assert_iterator_values(self,
iterator,
expected_values,
evaluate_fn,
devices,
enable_get_next_as_optional=False):
actual_values = []
for _ in range(len(expected_values)):
if enable_get_next_as_optional:
next_element = iterator.get_next_as_optional().get_value()
else:
next_element = iterator.get_next()
computed_value = evaluate_fn([
distribute_utils.select_replica(r, next_element)
for r in range(len(devices))
])
actual_values.append(computed_value)
for expected_value, actual_value in zip(expected_values, actual_values):
for expected, actual in zip(expected_value, actual_value):
self.assertAllEqual(expected, actual)
def _assert_dataset_values_for_loop(self, dataset, expected_values,
evaluate_fn, devices):
actual_values = []
for x in dataset:
computed_value = self.evaluate(
[distribute_utils.select_replica(r, x) for r in range(len(devices))])
actual_values.append(computed_value)
for expected_value, actual_value in zip(expected_values, actual_values):
for expected, actual in zip(expected_value, actual_value):
self.assertAllEqual(expected, actual)
def _test_input_iteration(self,
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
strategy,
sess=None,
num_replicas_in_sync=None,
input_context=None):
if iteration_type == "for_loop" and not context.executing_eagerly():
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and iteration_type == "for_loop":
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and input_type == "input_fn":
self.skipTest("unsupported test combination.")
devices = nest.flatten([ds for _, ds in worker_device_pairs])
input_workers = input_lib.InputWorkers(worker_device_pairs)
if api_type == "wrap_into_iterator":
iterator = self._wrap_iterator(
input_type,
dataset_or_input_fn,
input_workers,
devices,
num_replicas_in_sync,
strategy,
input_context=input_context)
else:
# wrapping into a dataset:
dataset = self._wrap_dataset(
input_type,
dataset_or_input_fn,
input_workers,
num_replicas_in_sync,
strategy,
input_context=input_context)
if ops.executing_eagerly_outside_functions():
iterator = iter(dataset)
else:
if isinstance(dataset, input_lib.DistributedDatasetV1):
iterator = dataset.make_initializable_iterator()
else:
self.skipTest("unsupported test combination")
if isinstance(iterator, composite_tensor.CompositeTensor):
nest.assert_same_structure(iterator, iterator._type_spec,
expand_composites=True)
if iteration_type == "get_next":
evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)
if not ops.executing_eagerly_outside_functions():
evaluate(control_flow_ops.group(iterator.initializer))
def test_get_next(iterator):
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
with self.assertRaises(errors.OutOfRangeError):
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
# After re-initializing the iterator, should be able to iterate again.
if not ops.executing_eagerly_outside_functions():
evaluate(control_flow_ops.group(iterator.initializer))
else:
if api_type == "wrap_into_iterator":
self.skipTest("unsupported test combination")
else:
iterator = iter(dataset)
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
def test_get_next_as_optional(iterator):
self._assert_iterator_values(
iterator,
expected_values,
evaluate,
devices,
enable_get_next_as_optional=True)
next_element = iterator.get_next_as_optional()
self.assertFalse(self.evaluate(next_element.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self._assert_iterator_values(
iterator, [0],
evaluate,
devices,
enable_get_next_as_optional=True)
test_get_next(iterator)
# re-initializing the iterator
if not tf2.enabled():
# TODO(yuefengz): we should split this function.
return
else:
if api_type == "wrap_into_iterator":
return
else:
iterator = iter(dataset)
test_get_next_as_optional(iterator)
if iteration_type == "for_loop" and context.executing_eagerly():
self._assert_dataset_values_for_loop(dataset, expected_values,
self.evaluate, devices)
def _create_dataset_or_input_fn(self, input_type, input_fn):
if input_type == "input_fn":
return input_fn
else:
return input_fn(distribute_lib.InputContext())
class DistributedIteratorTest(DistributedIteratorTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu
]))
def testDisablingOwnedIteratorsInTF2(self, distribution, input_type):
if not tf2.enabled():
self.skipTest("unsupported test combination")
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
input_workers = input_lib.InputWorkers(worker_device_pairs)
if input_type == "dataset":
dist_dataset = input_lib.get_distributed_dataset(dataset_or_input_fn,
input_workers,
distribution)
else:
dist_dataset = input_lib.get_distributed_datasets_from_function(
dataset_or_input_fn, input_workers, [distribute_lib.InputContext()],
distribution)
# Default Iterator types in TF2.
iterator = iter(dist_dataset)
self.assertIsInstance(iterator, input_lib.DistributedIterator)
self.assertIsInstance(iterator._iterators[0],
input_lib._SingleWorkerOwnedDatasetIterator)
# Disable creating owned iterators by setting a property on the strategy.
distribution._enable_legacy_iterators = True
iterator = iter(dist_dataset)
self.assertIsInstance(iterator, input_lib.DistributedIteratorV1)
self.assertIsInstance(iterator._iterators[0],
input_lib._SingleWorkerDatasetIterator)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
]))
def testMultiDeviceIterInitialize(self, distribution):
if tf2.enabled():
self.skipTest("Only V1 is supported.")
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
input_workers = input_lib.InputWorkers(worker_device_pairs)
dist_dataset = input_lib.get_distributed_dataset(
dataset_fn(distribute_lib.InputContext()), input_workers, distribution)
iterator = dataset_ops.make_one_shot_iterator(dist_dataset)
@def_function.function
def init_func_for_iter():
self.evaluate(iterator.initializer)
init_func_for_iter()
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.multi_worker_mirrored_2x1_cpu],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPUMultiWorker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.tpu_strategy],
enable_get_next_as_optional=[True, False]))
def testTPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = collections.OrderedDict()
for tpu_device in distribution.extended.worker_devices:
host_device = device_util.get_host_for_device(tpu_device)
worker_device_pairs.setdefault(host_device, [])
worker_device_pairs[host_device].append(tpu_device)
worker_device_pairs = worker_device_pairs.items()
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False]))
def testTupleDataset(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[(i, i**2), (i+1, (i+1)**2)] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.multi_worker_mirrored_2x2_gpu],
enable_get_next_as_optional=[True, False]))
def testTupleDatasetMultiworker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [
[(i, i**2), (i + 1, (i + 1)**2)] for i in range(0, 10, 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
# Input_context is not passed in and thus no sharding.
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testIterableIterator(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset = dataset_ops.Dataset.range(10)
dist_dataset = input_lib.get_distributed_dataset(dataset, input_workers,
distribution)
iterator = iter(dist_dataset)
for i, element in enumerate(iterator):
self.assertAllEqual(distribution.experimental_local_results(element), [i])
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
]))
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type,
drop_remainder, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch( # pylint: disable=g-long-lambda
2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
else:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testUnevenDatasetBatchesMultiWorker(self, input_type, api_type,
iteration_type, drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(9)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]]]
else:
# The last global batch only contains data for one replica.
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]], [[8]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]], [[]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
]))
def testUnevenDatasetBatchesMultiWorkerFourReplicas(self, input_type,
api_type, iteration_type,
drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(15)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(4, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]]]
else:
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]], [[12], [14]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]], [[13], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testBatchSplitting(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
batch_size = 10
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [[range(i, i+updated_batch_size),
range(i+updated_batch_size, i+2*updated_batch_size)]
for i in range(0, 100, updated_batch_size*2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
enable_get_next_as_optional=[True, False]))
def testBatchSplittingMultiWorker(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
batch_size = 10
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(100).batch(batch_size)
return dataset
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [
[ # pylint: disable=g-complex-comprehension
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
))
def testCacheAcrossIteration(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(16).shuffle(16).cache().batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
reshuffle=[True, False]))
def testShuffleAcrossIterations(self, distribution, reshuffle):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
if not reshuffle and not compat.forward_compatible(2020, 5, 22):
self.skipTest("Functionality currently not supported.")
dataset = dataset_ops.Dataset.range(12).shuffle(
12, reshuffle_each_iteration=reshuffle).batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
if reshuffle:
self.assertNotAllEqual(first_epoch, second_epoch)
else:
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShape(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
for data in dist_dataset:
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature[replica_id].shape)
self.assertEqual([per_replica_batch_size], label[replica_id].shape)
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.OFF]))
def testAutoshardingOption(self, distribution, input_type, api_type,
iteration_type, auto_shard_policy):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
ds_option = dataset_ops.Options()
ds_option.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset_fn = (
lambda _: dataset_ops.Dataset.range(4).with_options(ds_option))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if auto_shard_policy == AutoShardPolicy.AUTO:
if id_in_cluster == 0:
expected_values = [[0], [2]]
else:
expected_values = [[1], [3]]
else:
expected_values = [[0], [1], [2], [3]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["input_fn"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testDifferentDatasetsMultiWorker(self, distribution, input_type, api_type,
iteration_type):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(ctx):
if ctx.input_pipeline_id == 0:
return dataset_ops.Dataset.range(8).batch(2)
else:
return dataset_ops.Dataset.range(9).batch(2)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if id_in_cluster == 0:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[]]]
else:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"]))
def testLoopOverDatasetInTFFunction(self, strategy):
dataset = dataset_ops.Dataset.range(10).map(lambda x: { # pylint: disable=g-long-lambda
"y": math_ops.cast(x, dtypes.float32) ** 2,
}).batch(4)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
with strategy.scope():
v = variables.Variable(0.0, aggregation=variables.VariableAggregation.SUM)
@def_function.function
def iterator_fn(dist_dataset):
def assign_add_fn(data):
v.assign_add(math_ops.reduce_sum(data["y"]))
for data in dist_dataset:
strategy.run(assign_add_fn, args=(data,))
iterator_fn(dist_dataset)
self.assertEqual(v.numpy(), 285.0)
class DistributedIteratorTensorTypeTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for DistributedDataset with non-dense tensors."""
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
defun_type=["lambda", "tf_function"],
))
def testRaggedSparse(self, distribution, input_type, drop_remainder,
defun_type):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
defun = {"lambda": lambda f: f,
"tf_function": def_function.function}[defun_type]
distribution.extended.experimental_enable_get_next_as_optional = True
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
dataset = self._wrap_dataset(input_type, dataset_or_input_fn,
distribution.extended._input_workers,
len(distribution.extended.worker_devices),
distribution)
# Assert that the tensors are rebatched and sparsity is preserved.
per_replica_batch = defun(lambda x: next(iter(x)))(dataset)
self.assertAllEqual(
distribute_utils.select_replica(0, per_replica_batch["dense"]),
[[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])
self.assertAllEqual(
distribute_utils.select_replica(1, per_replica_batch["dense"]),
[[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])
# Transitively check the ragged and sparse tensors by densification.
for i in range(2):
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["ragged"]).values,
6)
self.assertAllEqual(
distribute_utils.select_replica(
i, per_replica_batch["ragged"]).to_tensor(),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["sparse"]).indices,
6)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(
distribute_utils.select_replica(i, per_replica_batch["sparse"])),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_for_loop(dataset):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
for batch in dataset:
sums = _reduce(sums, batch)
return sums
def sum_while_loop(iterator, reduce_fn):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
while True:
try:
sums = reduce_fn(sums, iterator)
except (StopIteration, errors.OutOfRangeError):
return sums
while_sums = sum_while_loop(
iter(dataset),
defun(lambda state, iterator: _reduce(state, next(iterator))))
self.assertAllEqual(
nest.flatten(while_sums),
# When there's no partial batch, the sum is smaller.
[200. if drop_remainder else 310.] * 3)
for_sums = defun(sum_for_loop)(dataset)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if (not drop_remainder or (
defun_type == "tf_function" and input_type == "input_fn")):
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(for_sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
tensor_type=["sparse", "ragged"],
enable_get_next_as_optional=[True, False]
))
def testRaggedSparseGetNextAsOptional(
self, distribution, input_type, drop_remainder, tensor_type,
enable_get_next_as_optional):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
tensor_type: (ragged_tensor if tensor_type == "ragged" else
ragged_tensor.to_sparse()),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
iterator = iter(ds)
self.assertEqual(iterator._enable_get_next_as_optional,
(not drop_remainder) and enable_get_next_as_optional)
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
# TODO(mdan): Add these?
# strategy_combinations.multi_worker_mirrored_2x1_cpu,
# strategy_combinations.multi_worker_mirrored_2x1_gpu,
# strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
))
def testRaggedSparseGetNextAsOptionalInLoop(
self, distribution, input_type, drop_remainder):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
self.skipTest("b/323359921")
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_while_loop(ds):
iterator = iter(ds)
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
try_next = constant_op.constant(True)
while try_next:
opt_iterate = iterator.get_next_as_optional()
if opt_iterate.has_value():
sums = _reduce(sums, opt_iterate.get_value())
else:
try_next = False
return sums
sums = def_function.function(sum_while_loop)(ds)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if not drop_remainder or input_type == "input_fn":
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatch(self, input_type, api_type, iteration_type,
distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(12).batch(8)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatchWithLegacyRebatch(self, input_type, api_type,
iteration_type, distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps. However, when we create a
# DistributedDataset and cannot statically infer the intended global batch
# size (e.g. if the user does not use a batching dataset), each worker will
# rebatch based on the dynamic batch size of the data encountered, even when
# it encounters partial batches. The last per-worker partial batch (size 4)
# ends up being split into two replicas, resulting in 4 steps in total, of
# (global) batch sizes 8, 8, 4, 4.
def dataset_fn(ctx):
del ctx
# The following dataset is equivalent to
# tf.data.Dataset.range(12).batch(8), but does not use a batching dataset.
# This causes DistributedDataset to use LegacyRebatch instead.
batch_sizes = dataset_ops.Dataset.from_tensor_slices([8, 4])
offsets = dataset_ops.Dataset.from_tensor_slices([0, 8])
dataset = dataset_ops.Dataset.zip((offsets, batch_sizes))
def map_fn(offset, batch_size):
return math_ops.range(offset, offset + batch_size)
dataset = dataset.map(map_fn)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is equivalent to
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as the number of global
# replicas is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]], [[10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.DATA]))
def testMWMSWithDataSharding(self, input_type, api_type, iteration_type,
distribution, auto_shard_policy):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior the dataset is sharded by data
# and the batch size is indivisible by the number of replicas. This checks
# that the elements are as expected and the batch size across all workers
# adds up to 3. This test will only pass if the autoshard rewrite rewrites
# RebatchDatasetV2 to legacy RebatchDataset when sharding by data.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(8).batch(3)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. We expect each worker to see different shards of
# data.
cr = distribution.cluster_resolver
worker_id = multi_worker_util.id_in_cluster(cr.cluster_spec(), cr.task_type,
cr.task_id)
if worker_id == 0:
expected_values = [[[0, 1]], [[3, 4]], [[6]]]
elif worker_id == 1:
expected_values = [[[2]], [[5]], [[7]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
class DistributedIteratorPerDeviceTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for PER_WORKER and PER_REPLICA's InputOptions variants."""
def setUp(self):
context._reset_context()
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(DistributedIteratorPerDeviceTest, self).setUp()
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testDevicePlacementForPerWorkerValuesWithPrefetch(self, distribution,
input_options):
def dataset_fn(input_context): # pylint: disable=[unused-argument]
return dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4])
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
for x in ds:
assert x.values[0].device == distribution.extended.worker_devices[0]
assert x.values[0].backing_device == distribution.extended.worker_devices[
0]
assert x.values[1].device == distribution.extended.worker_devices[1]
assert x.values[1].backing_device == distribution.extended.worker_devices[
1]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER)
],
mode=["eager"],
))
def testDevicePlacementForPerWorkerValuesWithoutPrefetch(
self, distribution, input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.full(4, input_context.input_pipeline_id))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
for x in ds:
x = distribution.run(lambda inputs: inputs, args=(x,))
assert x.values[
0].device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
0].backing_device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
1].device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
1].backing_device == "/job:localhost/replica:0/task:0/device:CPU:0"
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_prefetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_prefetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA)
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testDevicePlacementForInvalidCombinations(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.full(4, input_context.input_pipeline_id))
with self.assertRaises(ValueError):
distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testOutputValuesForPerWorkerInputOptions(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 11).reshape(
(2, 5)) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
# validating the values
x = next(iter(ds))
assert np.array_equal(x.values[0].numpy(), np.array([1, 2, 3, 4, 5]))
assert np.array_equal(x.values[1].numpy(), np.array([6, 7, 8, 9, 10]))
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_prefetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_prefetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testOutputValuesForPerReplicaInputOptions(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 10) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
expected = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
for i, x in enumerate(ds):
# validating the values
assert x.values[0].numpy() == expected[i]
assert x.values[1].numpy() == expected[i] * 2
loop_num = i
assert loop_num == len(expected) - 1
if __name__ == "__main__":
test_util.main()
| |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'IanaInterfaceTypeIdentity' : {
'meta_info' : _MetaInfoClass('IanaInterfaceTypeIdentity',
False,
[
],
'iana-if-type',
'iana-interface-type',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoicefxoIdentity' : {
'meta_info' : _MetaInfoClass('VoicefxoIdentity',
False,
[
],
'iana-if-type',
'voiceFXO',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmvciendptIdentity' : {
'meta_info' : _MetaInfoClass('AtmvciendptIdentity',
False,
[
],
'iana-if-type',
'atmVciEndPt',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Propbwap2MpIdentity' : {
'meta_info' : _MetaInfoClass('Propbwap2MpIdentity',
False,
[
],
'iana-if-type',
'propBWAp2Mp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropdocswirelessdownstreamIdentity' : {
'meta_info' : _MetaInfoClass('PropdocswirelessdownstreamIdentity',
False,
[
],
'iana-if-type',
'propDocsWirelessDownstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'V11Identity' : {
'meta_info' : _MetaInfoClass('V11Identity',
False,
[
],
'iana-if-type',
'v11',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SoftwareloopbackIdentity' : {
'meta_info' : _MetaInfoClass('SoftwareloopbackIdentity',
False,
[
],
'iana-if-type',
'softwareLoopback',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HdlcIdentity' : {
'meta_info' : _MetaInfoClass('HdlcIdentity',
False,
[
],
'iana-if-type',
'hdlc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoicefgdosIdentity' : {
'meta_info' : _MetaInfoClass('VoicefgdosIdentity',
False,
[
],
'iana-if-type',
'voiceFGDOS',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FastetherfxIdentity' : {
'meta_info' : _MetaInfoClass('FastetherfxIdentity',
False,
[
],
'iana-if-type',
'fastEtherFX',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbtdmIdentity' : {
'meta_info' : _MetaInfoClass('DvbtdmIdentity',
False,
[
],
'iana-if-type',
'dvbTdm',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'NfasIdentity' : {
'meta_info' : _MetaInfoClass('NfasIdentity',
False,
[
],
'iana-if-type',
'nfas',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IfpwtypeIdentity' : {
'meta_info' : _MetaInfoClass('IfpwtypeIdentity',
False,
[
],
'iana-if-type',
'ifPwType',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'L2VlanIdentity' : {
'meta_info' : _MetaInfoClass('L2VlanIdentity',
False,
[
],
'iana-if-type',
'l2vlan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Adsl2PlusIdentity' : {
'meta_info' : _MetaInfoClass('Adsl2PlusIdentity',
False,
[
],
'iana-if-type',
'adsl2plus',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ieee802154Identity' : {
'meta_info' : _MetaInfoClass('Ieee802154Identity',
False,
[
],
'iana-if-type',
'ieee802154',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoicefxsIdentity' : {
'meta_info' : _MetaInfoClass('VoicefxsIdentity',
False,
[
],
'iana-if-type',
'voiceFXS',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbrcsmaclayerIdentity' : {
'meta_info' : _MetaInfoClass('DvbrcsmaclayerIdentity',
False,
[
],
'iana-if-type',
'dvbRcsMacLayer',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IdslIdentity' : {
'meta_info' : _MetaInfoClass('IdslIdentity',
False,
[
],
'iana-if-type',
'idsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'InfinibandIdentity' : {
'meta_info' : _MetaInfoClass('InfinibandIdentity',
False,
[
],
'iana-if-type',
'infiniband',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ddnx25Identity' : {
'meta_info' : _MetaInfoClass('Ddnx25Identity',
False,
[
],
'iana-if-type',
'ddnX25',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Wwanpp2Identity' : {
'meta_info' : _MetaInfoClass('Wwanpp2Identity',
False,
[
],
'iana-if-type',
'wwanPP2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DocscableupstreamIdentity' : {
'meta_info' : _MetaInfoClass('DocscableupstreamIdentity',
False,
[
],
'iana-if-type',
'docsCableUpstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ethernet3MbitIdentity' : {
'meta_info' : _MetaInfoClass('Ethernet3MbitIdentity',
False,
[
],
'iana-if-type',
'ethernet3Mbit',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DigitalpowerlineIdentity' : {
'meta_info' : _MetaInfoClass('DigitalpowerlineIdentity',
False,
[
],
'iana-if-type',
'digitalPowerline',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'H323ProxyIdentity' : {
'meta_info' : _MetaInfoClass('H323ProxyIdentity',
False,
[
],
'iana-if-type',
'h323Proxy',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'GtpIdentity' : {
'meta_info' : _MetaInfoClass('GtpIdentity',
False,
[
],
'iana-if-type',
'gtp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IpoveratmIdentity' : {
'meta_info' : _MetaInfoClass('IpoveratmIdentity',
False,
[
],
'iana-if-type',
'ipOverAtm',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AlueponIdentity' : {
'meta_info' : _MetaInfoClass('AlueponIdentity',
False,
[
],
'iana-if-type',
'aluEpon',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ImtIdentity' : {
'meta_info' : _MetaInfoClass('ImtIdentity',
False,
[
],
'iana-if-type',
'imt',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IpswitchIdentity' : {
'meta_info' : _MetaInfoClass('IpswitchIdentity',
False,
[
],
'iana-if-type',
'ipSwitch',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MsdslIdentity' : {
'meta_info' : _MetaInfoClass('MsdslIdentity',
False,
[
],
'iana-if-type',
'msdsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbrccmaclayerIdentity' : {
'meta_info' : _MetaInfoClass('DvbrccmaclayerIdentity',
False,
[
],
'iana-if-type',
'dvbRccMacLayer',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SmdsdxiIdentity' : {
'meta_info' : _MetaInfoClass('SmdsdxiIdentity',
False,
[
],
'iana-if-type',
'smdsDxi',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceoveratmIdentity' : {
'meta_info' : _MetaInfoClass('VoiceoveratmIdentity',
False,
[
],
'iana-if-type',
'voiceOverAtm',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ArapIdentity' : {
'meta_info' : _MetaInfoClass('ArapIdentity',
False,
[
],
'iana-if-type',
'arap',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FastetherIdentity' : {
'meta_info' : _MetaInfoClass('FastetherIdentity',
False,
[
],
'iana-if-type',
'fastEther',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MpcIdentity' : {
'meta_info' : _MetaInfoClass('MpcIdentity',
False,
[
],
'iana-if-type',
'mpc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'LinegroupIdentity' : {
'meta_info' : _MetaInfoClass('LinegroupIdentity',
False,
[
],
'iana-if-type',
'linegroup',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HippiIdentity' : {
'meta_info' : _MetaInfoClass('HippiIdentity',
False,
[
],
'iana-if-type',
'hippi',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'RprIdentity' : {
'meta_info' : _MetaInfoClass('RprIdentity',
False,
[
],
'iana-if-type',
'rpr',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ds1FdlIdentity' : {
'meta_info' : _MetaInfoClass('Ds1FdlIdentity',
False,
[
],
'iana-if-type',
'ds1FDL',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SonetvtIdentity' : {
'meta_info' : _MetaInfoClass('SonetvtIdentity',
False,
[
],
'iana-if-type',
'sonetVT',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceencapIdentity' : {
'meta_info' : _MetaInfoClass('VoiceencapIdentity',
False,
[
],
'iana-if-type',
'voiceEncap',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ss7SiglinkIdentity' : {
'meta_info' : _MetaInfoClass('Ss7SiglinkIdentity',
False,
[
],
'iana-if-type',
'ss7SigLink',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ArcnetIdentity' : {
'meta_info' : _MetaInfoClass('ArcnetIdentity',
False,
[
],
'iana-if-type',
'arcnet',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ActelismetaloopIdentity' : {
'meta_info' : _MetaInfoClass('ActelismetaloopIdentity',
False,
[
],
'iana-if-type',
'actelisMetaLOOP',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'QllcIdentity' : {
'meta_info' : _MetaInfoClass('QllcIdentity',
False,
[
],
'iana-if-type',
'qllc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Rfc877X25Identity' : {
'meta_info' : _MetaInfoClass('Rfc877X25Identity',
False,
[
],
'iana-if-type',
'rfc877x25',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MpegtransportIdentity' : {
'meta_info' : _MetaInfoClass('MpegtransportIdentity',
False,
[
],
'iana-if-type',
'mpegTransport',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'X25MlpIdentity' : {
'meta_info' : _MetaInfoClass('X25MlpIdentity',
False,
[
],
'iana-if-type',
'x25mlp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VirtualtgIdentity' : {
'meta_info' : _MetaInfoClass('VirtualtgIdentity',
False,
[
],
'iana-if-type',
'virtualTg',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HostpadIdentity' : {
'meta_info' : _MetaInfoClass('HostpadIdentity',
False,
[
],
'iana-if-type',
'hostPad',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'StarlanIdentity' : {
'meta_info' : _MetaInfoClass('StarlanIdentity',
False,
[
],
'iana-if-type',
'starLan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88025DtrIdentity' : {
'meta_info' : _MetaInfoClass('Iso88025DtrIdentity',
False,
[
],
'iana-if-type',
'iso88025Dtr',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ibm370ParchanIdentity' : {
'meta_info' : _MetaInfoClass('Ibm370ParchanIdentity',
False,
[
],
'iana-if-type',
'ibm370parChan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Adsl2Identity' : {
'meta_info' : _MetaInfoClass('Adsl2Identity',
False,
[
],
'iana-if-type',
'adsl2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'OtnotuIdentity' : {
'meta_info' : _MetaInfoClass('OtnotuIdentity',
False,
[
],
'iana-if-type',
'otnOtu',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Propwirelessp2PIdentity' : {
'meta_info' : _MetaInfoClass('Propwirelessp2PIdentity',
False,
[
],
'iana-if-type',
'propWirelessP2P',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'InterleaveIdentity' : {
'meta_info' : _MetaInfoClass('InterleaveIdentity',
False,
[
],
'iana-if-type',
'interleave',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IsupIdentity' : {
'meta_info' : _MetaInfoClass('IsupIdentity',
False,
[
],
'iana-if-type',
'isup',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Regular1822Identity' : {
'meta_info' : _MetaInfoClass('Regular1822Identity',
False,
[
],
'iana-if-type',
'regular1822',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Gr303RdtIdentity' : {
'meta_info' : _MetaInfoClass('Gr303RdtIdentity',
False,
[
],
'iana-if-type',
'gr303RDT',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropdocswirelessmaclayerIdentity' : {
'meta_info' : _MetaInfoClass('PropdocswirelessmaclayerIdentity',
False,
[
],
'iana-if-type',
'propDocsWirelessMaclayer',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AsyncIdentity' : {
'meta_info' : _MetaInfoClass('AsyncIdentity',
False,
[
],
'iana-if-type',
'async',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'RadiomacIdentity' : {
'meta_info' : _MetaInfoClass('RadiomacIdentity',
False,
[
],
'iana-if-type',
'radioMAC',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'OpticalchannelgroupIdentity' : {
'meta_info' : _MetaInfoClass('OpticalchannelgroupIdentity',
False,
[
],
'iana-if-type',
'opticalChannelGroup',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SixtofourIdentity' : {
'meta_info' : _MetaInfoClass('SixtofourIdentity',
False,
[
],
'iana-if-type',
'sixToFour',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropdocswirelessupstreamIdentity' : {
'meta_info' : _MetaInfoClass('PropdocswirelessupstreamIdentity',
False,
[
],
'iana-if-type',
'propDocsWirelessUpstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Q2931Identity' : {
'meta_info' : _MetaInfoClass('Q2931Identity',
False,
[
],
'iana-if-type',
'q2931',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FddiIdentity' : {
'meta_info' : _MetaInfoClass('FddiIdentity',
False,
[
],
'iana-if-type',
'fddi',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropcnlsIdentity' : {
'meta_info' : _MetaInfoClass('PropcnlsIdentity',
False,
[
],
'iana-if-type',
'propCnls',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Aal2Identity' : {
'meta_info' : _MetaInfoClass('Aal2Identity',
False,
[
],
'iana-if-type',
'aal2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbasioutIdentity' : {
'meta_info' : _MetaInfoClass('DvbasioutIdentity',
False,
[
],
'iana-if-type',
'dvbAsiOut',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AluelpIdentity' : {
'meta_info' : _MetaInfoClass('AluelpIdentity',
False,
[
],
'iana-if-type',
'aluELP',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CiscoislvlanIdentity' : {
'meta_info' : _MetaInfoClass('CiscoislvlanIdentity',
False,
[
],
'iana-if-type',
'ciscoISLvlan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DocscableupstreamrfportIdentity' : {
'meta_info' : _MetaInfoClass('DocscableupstreamrfportIdentity',
False,
[
],
'iana-if-type',
'docsCableUpstreamRfPort',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Aal5Identity' : {
'meta_info' : _MetaInfoClass('Aal5Identity',
False,
[
],
'iana-if-type',
'aal5',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FrdlciendptIdentity' : {
'meta_info' : _MetaInfoClass('FrdlciendptIdentity',
False,
[
],
'iana-if-type',
'frDlciEndPt',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HippiinterfaceIdentity' : {
'meta_info' : _MetaInfoClass('HippiinterfaceIdentity',
False,
[
],
'iana-if-type',
'hippiInterface',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'L3IpvlanIdentity' : {
'meta_info' : _MetaInfoClass('L3IpvlanIdentity',
False,
[
],
'iana-if-type',
'l3ipvlan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Miox25Identity' : {
'meta_info' : _MetaInfoClass('Miox25Identity',
False,
[
],
'iana-if-type',
'miox25',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HssiIdentity' : {
'meta_info' : _MetaInfoClass('HssiIdentity',
False,
[
],
'iana-if-type',
'hssi',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmvirtualIdentity' : {
'meta_info' : _MetaInfoClass('AtmvirtualIdentity',
False,
[
],
'iana-if-type',
'atmVirtual',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AlugpononuIdentity' : {
'meta_info' : _MetaInfoClass('AlugpononuIdentity',
False,
[
],
'iana-if-type',
'aluGponOnu',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Rfc1483Identity' : {
'meta_info' : _MetaInfoClass('Rfc1483Identity',
False,
[
],
'iana-if-type',
'rfc1483',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CnrIdentity' : {
'meta_info' : _MetaInfoClass('CnrIdentity',
False,
[
],
'iana-if-type',
'cnr',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SipsigIdentity' : {
'meta_info' : _MetaInfoClass('SipsigIdentity',
False,
[
],
'iana-if-type',
'sipSig',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MyrinetIdentity' : {
'meta_info' : _MetaInfoClass('MyrinetIdentity',
False,
[
],
'iana-if-type',
'myrinet',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DlswIdentity' : {
'meta_info' : _MetaInfoClass('DlswIdentity',
False,
[
],
'iana-if-type',
'dlsw',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'GigabitethernetIdentity' : {
'meta_info' : _MetaInfoClass('GigabitethernetIdentity',
False,
[
],
'iana-if-type',
'gigabitEthernet',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'X25PleIdentity' : {
'meta_info' : _MetaInfoClass('X25PleIdentity',
False,
[
],
'iana-if-type',
'x25ple',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'LmpIdentity' : {
'meta_info' : _MetaInfoClass('LmpIdentity',
False,
[
],
'iana-if-type',
'lmp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'OpticaltransportIdentity' : {
'meta_info' : _MetaInfoClass('OpticaltransportIdentity',
False,
[
],
'iana-if-type',
'opticalTransport',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SdlcIdentity' : {
'meta_info' : _MetaInfoClass('SdlcIdentity',
False,
[
],
'iana-if-type',
'sdlc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceemIdentity' : {
'meta_info' : _MetaInfoClass('VoiceemIdentity',
False,
[
],
'iana-if-type',
'voiceEM',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'X86LapsIdentity' : {
'meta_info' : _MetaInfoClass('X86LapsIdentity',
False,
[
],
'iana-if-type',
'x86Laps',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'G9982Identity' : {
'meta_info' : _MetaInfoClass('G9982Identity',
False,
[
],
'iana-if-type',
'g9982',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88022LlcIdentity' : {
'meta_info' : _MetaInfoClass('Iso88022LlcIdentity',
False,
[
],
'iana-if-type',
'iso88022llc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbasiinIdentity' : {
'meta_info' : _MetaInfoClass('DvbasiinIdentity',
False,
[
],
'iana-if-type',
'dvbAsiIn',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'BgppolicyaccountingIdentity' : {
'meta_info' : _MetaInfoClass('BgppolicyaccountingIdentity',
False,
[
],
'iana-if-type',
'bgppolicyaccounting',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AluepononuIdentity' : {
'meta_info' : _MetaInfoClass('AluepononuIdentity',
False,
[
],
'iana-if-type',
'aluEponOnu',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MfsiglinkIdentity' : {
'meta_info' : _MetaInfoClass('MfsiglinkIdentity',
False,
[
],
'iana-if-type',
'mfSigLink',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DcnIdentity' : {
'meta_info' : _MetaInfoClass('DcnIdentity',
False,
[
],
'iana-if-type',
'dcn',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmdxiIdentity' : {
'meta_info' : _MetaInfoClass('AtmdxiIdentity',
False,
[
],
'iana-if-type',
'atmDxi',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceoverframerelayIdentity' : {
'meta_info' : _MetaInfoClass('VoiceoverframerelayIdentity',
False,
[
],
'iana-if-type',
'voiceOverFrameRelay',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'GfpIdentity' : {
'meta_info' : _MetaInfoClass('GfpIdentity',
False,
[
],
'iana-if-type',
'gfp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SonetoverheadchannelIdentity' : {
'meta_info' : _MetaInfoClass('SonetoverheadchannelIdentity',
False,
[
],
'iana-if-type',
'sonetOverheadChannel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VmwarevirtualnicIdentity' : {
'meta_info' : _MetaInfoClass('VmwarevirtualnicIdentity',
False,
[
],
'iana-if-type',
'vmwareVirtualNic',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FciplinkIdentity' : {
'meta_info' : _MetaInfoClass('FciplinkIdentity',
False,
[
],
'iana-if-type',
'fcipLink',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IpoverclawIdentity' : {
'meta_info' : _MetaInfoClass('IpoverclawIdentity',
False,
[
],
'iana-if-type',
'ipOverClaw',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CoffeeIdentity' : {
'meta_info' : _MetaInfoClass('CoffeeIdentity',
False,
[
],
'iana-if-type',
'coffee',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'RadslIdentity' : {
'meta_info' : _MetaInfoClass('RadslIdentity',
False,
[
],
'iana-if-type',
'radsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Vdsl2Identity' : {
'meta_info' : _MetaInfoClass('Vdsl2Identity',
False,
[
],
'iana-if-type',
'vdsl2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Rs232Identity' : {
'meta_info' : _MetaInfoClass('Rs232Identity',
False,
[
],
'iana-if-type',
'rs232',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'E1Identity' : {
'meta_info' : _MetaInfoClass('E1Identity',
False,
[
],
'iana-if-type',
'e1',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ReachdslIdentity' : {
'meta_info' : _MetaInfoClass('ReachdslIdentity',
False,
[
],
'iana-if-type',
'reachDSL',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceovercableIdentity' : {
'meta_info' : _MetaInfoClass('VoiceovercableIdentity',
False,
[
],
'iana-if-type',
'voiceOverCable',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Tr008Identity' : {
'meta_info' : _MetaInfoClass('Tr008Identity',
False,
[
],
'iana-if-type',
'tr008',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceoveripIdentity' : {
'meta_info' : _MetaInfoClass('VoiceoveripIdentity',
False,
[
],
'iana-if-type',
'voiceOverIp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmIdentity' : {
'meta_info' : _MetaInfoClass('AtmIdentity',
False,
[
],
'iana-if-type',
'atm',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ds3Identity' : {
'meta_info' : _MetaInfoClass('Ds3Identity',
False,
[
],
'iana-if-type',
'ds3',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ds0Identity' : {
'meta_info' : _MetaInfoClass('Ds0Identity',
False,
[
],
'iana-if-type',
'ds0',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ds1Identity' : {
'meta_info' : _MetaInfoClass('Ds1Identity',
False,
[
],
'iana-if-type',
'ds1',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SrpIdentity' : {
'meta_info' : _MetaInfoClass('SrpIdentity',
False,
[
],
'iana-if-type',
'srp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DocscabledownstreamIdentity' : {
'meta_info' : _MetaInfoClass('DocscabledownstreamIdentity',
False,
[
],
'iana-if-type',
'docsCableDownstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbrcstdmaIdentity' : {
'meta_info' : _MetaInfoClass('DvbrcstdmaIdentity',
False,
[
],
'iana-if-type',
'dvbRcsTdma',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'G9983Identity' : {
'meta_info' : _MetaInfoClass('G9983Identity',
False,
[
],
'iana-if-type',
'g9983',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PlcIdentity' : {
'meta_info' : _MetaInfoClass('PlcIdentity',
False,
[
],
'iana-if-type',
'plc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FramerelaympiIdentity' : {
'meta_info' : _MetaInfoClass('FramerelaympiIdentity',
False,
[
],
'iana-if-type',
'frameRelayMPI',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MvlIdentity' : {
'meta_info' : _MetaInfoClass('MvlIdentity',
False,
[
],
'iana-if-type',
'mvl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropmultiplexorIdentity' : {
'meta_info' : _MetaInfoClass('PropmultiplexorIdentity',
False,
[
],
'iana-if-type',
'propMultiplexor',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoicedidIdentity' : {
'meta_info' : _MetaInfoClass('VoicedidIdentity',
False,
[
],
'iana-if-type',
'voiceDID',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CompositelinkIdentity' : {
'meta_info' : _MetaInfoClass('CompositelinkIdentity',
False,
[
],
'iana-if-type',
'compositeLink',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Proteon10MbitIdentity' : {
'meta_info' : _MetaInfoClass('Proteon10MbitIdentity',
False,
[
],
'iana-if-type',
'proteon10Mbit',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmbondIdentity' : {
'meta_info' : _MetaInfoClass('AtmbondIdentity',
False,
[
],
'iana-if-type',
'atmbond',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Frf16MfrbundleIdentity' : {
'meta_info' : _MetaInfoClass('Frf16MfrbundleIdentity',
False,
[
],
'iana-if-type',
'frf16MfrBundle',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CctemulIdentity' : {
'meta_info' : _MetaInfoClass('CctemulIdentity',
False,
[
],
'iana-if-type',
'cctEmul',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MplstunnelIdentity' : {
'meta_info' : _MetaInfoClass('MplstunnelIdentity',
False,
[
],
'iana-if-type',
'mplsTunnel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'GponIdentity' : {
'meta_info' : _MetaInfoClass('GponIdentity',
False,
[
],
'iana-if-type',
'gpon',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VdslIdentity' : {
'meta_info' : _MetaInfoClass('VdslIdentity',
False,
[
],
'iana-if-type',
'vdsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PosIdentity' : {
'meta_info' : _MetaInfoClass('PosIdentity',
False,
[
],
'iana-if-type',
'pos',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ieee8023AdlagIdentity' : {
'meta_info' : _MetaInfoClass('Ieee8023AdlagIdentity',
False,
[
],
'iana-if-type',
'ieee8023adLag',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DocscablemaclayerIdentity' : {
'meta_info' : _MetaInfoClass('DocscablemaclayerIdentity',
False,
[
],
'iana-if-type',
'docsCableMaclayer',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DocscablemcmtsdownstreamIdentity' : {
'meta_info' : _MetaInfoClass('DocscablemcmtsdownstreamIdentity',
False,
[
],
'iana-if-type',
'docsCableMCmtsDownstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PppIdentity' : {
'meta_info' : _MetaInfoClass('PppIdentity',
False,
[
],
'iana-if-type',
'ppp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FramerelayIdentity' : {
'meta_info' : _MetaInfoClass('FramerelayIdentity',
False,
[
],
'iana-if-type',
'frameRelay',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'EplrsIdentity' : {
'meta_info' : _MetaInfoClass('EplrsIdentity',
False,
[
],
'iana-if-type',
'eplrs',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VmwarenicteamIdentity' : {
'meta_info' : _MetaInfoClass('VmwarenicteamIdentity',
False,
[
],
'iana-if-type',
'vmwareNicTeam',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CabledownstreamrfportIdentity' : {
'meta_info' : _MetaInfoClass('CabledownstreamrfportIdentity',
False,
[
],
'iana-if-type',
'cableDownstreamRfPort',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MacsecuncontrolledifIdentity' : {
'meta_info' : _MetaInfoClass('MacsecuncontrolledifIdentity',
False,
[
],
'iana-if-type',
'macSecUncontrolledIF',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88023CsmacdIdentity' : {
'meta_info' : _MetaInfoClass('Iso88023CsmacdIdentity',
False,
[
],
'iana-if-type',
'iso88023Csmacd',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'UsbIdentity' : {
'meta_info' : _MetaInfoClass('UsbIdentity',
False,
[
],
'iana-if-type',
'usb',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmfuniIdentity' : {
'meta_info' : _MetaInfoClass('AtmfuniIdentity',
False,
[
],
'iana-if-type',
'atmFuni',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'TelinkIdentity' : {
'meta_info' : _MetaInfoClass('TelinkIdentity',
False,
[
],
'iana-if-type',
'teLink',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Pon622Identity' : {
'meta_info' : _MetaInfoClass('Pon622Identity',
False,
[
],
'iana-if-type',
'pon622',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'EconetIdentity' : {
'meta_info' : _MetaInfoClass('EconetIdentity',
False,
[
],
'iana-if-type',
'econet',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'TdlcIdentity' : {
'meta_info' : _MetaInfoClass('TdlcIdentity',
False,
[
],
'iana-if-type',
'tdlc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ds0BundleIdentity' : {
'meta_info' : _MetaInfoClass('Ds0BundleIdentity',
False,
[
],
'iana-if-type',
'ds0Bundle',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FastIdentity' : {
'meta_info' : _MetaInfoClass('FastIdentity',
False,
[
],
'iana-if-type',
'fast',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ieee1394Identity' : {
'meta_info' : _MetaInfoClass('Ieee1394Identity',
False,
[
],
'iana-if-type',
'ieee1394',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CblvectastarIdentity' : {
'meta_info' : _MetaInfoClass('CblvectastarIdentity',
False,
[
],
'iana-if-type',
'cblVectaStar',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'RsrbIdentity' : {
'meta_info' : _MetaInfoClass('RsrbIdentity',
False,
[
],
'iana-if-type',
'rsrb',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FramerelayinterconnectIdentity' : {
'meta_info' : _MetaInfoClass('FramerelayinterconnectIdentity',
False,
[
],
'iana-if-type',
'frameRelayInterconnect',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IsdnsIdentity' : {
'meta_info' : _MetaInfoClass('IsdnsIdentity',
False,
[
],
'iana-if-type',
'isdns',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PppmultilinkbundleIdentity' : {
'meta_info' : _MetaInfoClass('PppmultilinkbundleIdentity',
False,
[
],
'iana-if-type',
'pppMultilinkBundle',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Aflane8025Identity' : {
'meta_info' : _MetaInfoClass('Aflane8025Identity',
False,
[
],
'iana-if-type',
'aflane8025',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'LapbIdentity' : {
'meta_info' : _MetaInfoClass('LapbIdentity',
False,
[
],
'iana-if-type',
'lapb',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Aflane8023Identity' : {
'meta_info' : _MetaInfoClass('Aflane8023Identity',
False,
[
],
'iana-if-type',
'aflane8023',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'LapdIdentity' : {
'meta_info' : _MetaInfoClass('LapdIdentity',
False,
[
],
'iana-if-type',
'lapd',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IsdnuIdentity' : {
'meta_info' : _MetaInfoClass('IsdnuIdentity',
False,
[
],
'iana-if-type',
'isdnu',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'LapfIdentity' : {
'meta_info' : _MetaInfoClass('LapfIdentity',
False,
[
],
'iana-if-type',
'lapf',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CapwapwtpvirtualradioIdentity' : {
'meta_info' : _MetaInfoClass('CapwapwtpvirtualradioIdentity',
False,
[
],
'iana-if-type',
'capwapWtpVirtualRadio',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IfvfitypeIdentity' : {
'meta_info' : _MetaInfoClass('IfvfitypeIdentity',
False,
[
],
'iana-if-type',
'ifVfiType',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'X25HuntgroupIdentity' : {
'meta_info' : _MetaInfoClass('X25HuntgroupIdentity',
False,
[
],
'iana-if-type',
'x25huntGroup',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ParaIdentity' : {
'meta_info' : _MetaInfoClass('ParaIdentity',
False,
[
],
'iana-if-type',
'para',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MacseccontrolledifIdentity' : {
'meta_info' : _MetaInfoClass('MacseccontrolledifIdentity',
False,
[
],
'iana-if-type',
'macSecControlledIF',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88024TokenbusIdentity' : {
'meta_info' : _MetaInfoClass('Iso88024TokenbusIdentity',
False,
[
],
'iana-if-type',
'iso88024TokenBus',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'LocaltalkIdentity' : {
'meta_info' : _MetaInfoClass('LocaltalkIdentity',
False,
[
],
'iana-if-type',
'localTalk',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HyperchannelIdentity' : {
'meta_info' : _MetaInfoClass('HyperchannelIdentity',
False,
[
],
'iana-if-type',
'hyperchannel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MediamailoveripIdentity' : {
'meta_info' : _MetaInfoClass('MediamailoveripIdentity',
False,
[
],
'iana-if-type',
'mediaMailOverIp',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IfGsnIdentity' : {
'meta_info' : _MetaInfoClass('IfGsnIdentity',
False,
[
],
'iana-if-type',
'if-gsn',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Capwapdot11ProfileIdentity' : {
'meta_info' : _MetaInfoClass('Capwapdot11ProfileIdentity',
False,
[
],
'iana-if-type',
'capwapDot11Profile',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'L3IpxvlanIdentity' : {
'meta_info' : _MetaInfoClass('L3IpxvlanIdentity',
False,
[
],
'iana-if-type',
'l3ipxvlan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmsubinterfaceIdentity' : {
'meta_info' : _MetaInfoClass('AtmsubinterfaceIdentity',
False,
[
],
'iana-if-type',
'atmSubInterface',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PrimaryisdnIdentity' : {
'meta_info' : _MetaInfoClass('PrimaryisdnIdentity',
False,
[
],
'iana-if-type',
'primaryISDN',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Proteon80MbitIdentity' : {
'meta_info' : _MetaInfoClass('Proteon80MbitIdentity',
False,
[
],
'iana-if-type',
'proteon80Mbit',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88026ManIdentity' : {
'meta_info' : _MetaInfoClass('Iso88026ManIdentity',
False,
[
],
'iana-if-type',
'iso88026Man',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DigitalwrapperoverheadchannelIdentity' : {
'meta_info' : _MetaInfoClass('DigitalwrapperoverheadchannelIdentity',
False,
[
],
'iana-if-type',
'digitalWrapperOverheadChannel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DocscableupstreamchannelIdentity' : {
'meta_info' : _MetaInfoClass('DocscableupstreamchannelIdentity',
False,
[
],
'iana-if-type',
'docsCableUpstreamChannel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'OpticalchannelIdentity' : {
'meta_info' : _MetaInfoClass('OpticalchannelIdentity',
False,
[
],
'iana-if-type',
'opticalChannel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'EthernetcsmacdIdentity' : {
'meta_info' : _MetaInfoClass('EthernetcsmacdIdentity',
False,
[
],
'iana-if-type',
'ethernetCsmacd',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'BitsIdentity' : {
'meta_info' : _MetaInfoClass('BitsIdentity',
False,
[
],
'iana-if-type',
'bits',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'TunnelIdentity' : {
'meta_info' : _MetaInfoClass('TunnelIdentity',
False,
[
],
'iana-if-type',
'tunnel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Hdsl2Identity' : {
'meta_info' : _MetaInfoClass('Hdsl2Identity',
False,
[
],
'iana-if-type',
'hdsl2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FramerelayserviceIdentity' : {
'meta_info' : _MetaInfoClass('FramerelayserviceIdentity',
False,
[
],
'iana-if-type',
'frameRelayService',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'MplsIdentity' : {
'meta_info' : _MetaInfoClass('MplsIdentity',
False,
[
],
'iana-if-type',
'mpls',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ieee80211Identity' : {
'meta_info' : _MetaInfoClass('Ieee80211Identity',
False,
[
],
'iana-if-type',
'ieee80211',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ieee80212Identity' : {
'meta_info' : _MetaInfoClass('Ieee80212Identity',
False,
[
],
'iana-if-type',
'ieee80212',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Mocaversion1Identity' : {
'meta_info' : _MetaInfoClass('Mocaversion1Identity',
False,
[
],
'iana-if-type',
'mocaVersion1',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SonetIdentity' : {
'meta_info' : _MetaInfoClass('SonetIdentity',
False,
[
],
'iana-if-type',
'sonet',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'EsconIdentity' : {
'meta_info' : _MetaInfoClass('EsconIdentity',
False,
[
],
'iana-if-type',
'escon',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AlueponlogicallinkIdentity' : {
'meta_info' : _MetaInfoClass('AlueponlogicallinkIdentity',
False,
[
],
'iana-if-type',
'aluEponLogicalLink',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'G703At2MbIdentity' : {
'meta_info' : _MetaInfoClass('G703At2MbIdentity',
False,
[
],
'iana-if-type',
'g703at2mb',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'UltraIdentity' : {
'meta_info' : _MetaInfoClass('UltraIdentity',
False,
[
],
'iana-if-type',
'ultra',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbrccdownstreamIdentity' : {
'meta_info' : _MetaInfoClass('DvbrccdownstreamIdentity',
False,
[
],
'iana-if-type',
'dvbRccDownstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SiptgIdentity' : {
'meta_info' : _MetaInfoClass('SiptgIdentity',
False,
[
],
'iana-if-type',
'sipTg',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SmdsicipIdentity' : {
'meta_info' : _MetaInfoClass('SmdsicipIdentity',
False,
[
],
'iana-if-type',
'smdsIcip',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'BridgeIdentity' : {
'meta_info' : _MetaInfoClass('BridgeIdentity',
False,
[
],
'iana-if-type',
'bridge',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmlogicalIdentity' : {
'meta_info' : _MetaInfoClass('AtmlogicalIdentity',
False,
[
],
'iana-if-type',
'atmLogical',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ProppointtopointserialIdentity' : {
'meta_info' : _MetaInfoClass('ProppointtopointserialIdentity',
False,
[
],
'iana-if-type',
'propPointToPointSerial',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'V35Identity' : {
'meta_info' : _MetaInfoClass('V35Identity',
False,
[
],
'iana-if-type',
'v35',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'V36Identity' : {
'meta_info' : _MetaInfoClass('V36Identity',
False,
[
],
'iana-if-type',
'v36',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'V37Identity' : {
'meta_info' : _MetaInfoClass('V37Identity',
False,
[
],
'iana-if-type',
'v37',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IpIdentity' : {
'meta_info' : _MetaInfoClass('IpIdentity',
False,
[
],
'iana-if-type',
'ip',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Gr303IdtIdentity' : {
'meta_info' : _MetaInfoClass('Gr303IdtIdentity',
False,
[
],
'iana-if-type',
'gr303IDT',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'BasicisdnIdentity' : {
'meta_info' : _MetaInfoClass('BasicisdnIdentity',
False,
[
],
'iana-if-type',
'basicISDN',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'G703At64KIdentity' : {
'meta_info' : _MetaInfoClass('G703At64KIdentity',
False,
[
],
'iana-if-type',
'g703at64k',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ArcnetplusIdentity' : {
'meta_info' : _MetaInfoClass('ArcnetplusIdentity',
False,
[
],
'iana-if-type',
'arcnetPlus',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PipIdentity' : {
'meta_info' : _MetaInfoClass('PipIdentity',
False,
[
],
'iana-if-type',
'pip',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DtmIdentity' : {
'meta_info' : _MetaInfoClass('DtmIdentity',
False,
[
],
'iana-if-type',
'dtm',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SlipIdentity' : {
'meta_info' : _MetaInfoClass('SlipIdentity',
False,
[
],
'iana-if-type',
'slip',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Hiperlan2Identity' : {
'meta_info' : _MetaInfoClass('Hiperlan2Identity',
False,
[
],
'iana-if-type',
'hiperlan2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AdslIdentity' : {
'meta_info' : _MetaInfoClass('AdslIdentity',
False,
[
],
'iana-if-type',
'adsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Ieee80216WmanIdentity' : {
'meta_info' : _MetaInfoClass('Ieee80216WmanIdentity',
False,
[
],
'iana-if-type',
'ieee80216WMAN',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmimaIdentity' : {
'meta_info' : _MetaInfoClass('AtmimaIdentity',
False,
[
],
'iana-if-type',
'atmIma',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IsdnIdentity' : {
'meta_info' : _MetaInfoClass('IsdnIdentity',
False,
[
],
'iana-if-type',
'isdn',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Capwapdot11BssIdentity' : {
'meta_info' : _MetaInfoClass('Capwapdot11BssIdentity',
False,
[
],
'iana-if-type',
'capwapDot11Bss',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SipIdentity' : {
'meta_info' : _MetaInfoClass('SipIdentity',
False,
[
],
'iana-if-type',
'sip',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Pdnetherloop2Identity' : {
'meta_info' : _MetaInfoClass('Pdnetherloop2Identity',
False,
[
],
'iana-if-type',
'pdnEtherLoop2',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceebsIdentity' : {
'meta_info' : _MetaInfoClass('VoiceebsIdentity',
False,
[
],
'iana-if-type',
'voiceEBS',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IpforwardIdentity' : {
'meta_info' : _MetaInfoClass('IpforwardIdentity',
False,
[
],
'iana-if-type',
'ipForward',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88025CrfpintIdentity' : {
'meta_info' : _MetaInfoClass('Iso88025CrfpintIdentity',
False,
[
],
'iana-if-type',
'iso88025CRFPInt',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropvirtualIdentity' : {
'meta_info' : _MetaInfoClass('PropvirtualIdentity',
False,
[
],
'iana-if-type',
'propVirtual',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'WwanppIdentity' : {
'meta_info' : _MetaInfoClass('WwanppIdentity',
False,
[
],
'iana-if-type',
'wwanPP',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'OtherIdentity' : {
'meta_info' : _MetaInfoClass('OtherIdentity',
False,
[
],
'iana-if-type',
'other',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Pon155Identity' : {
'meta_info' : _MetaInfoClass('Pon155Identity',
False,
[
],
'iana-if-type',
'pon155',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'QamIdentity' : {
'meta_info' : _MetaInfoClass('QamIdentity',
False,
[
],
'iana-if-type',
'qam',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'OtnoduIdentity' : {
'meta_info' : _MetaInfoClass('OtnoduIdentity',
False,
[
],
'iana-if-type',
'otnOdu',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88025FiberIdentity' : {
'meta_info' : _MetaInfoClass('Iso88025FiberIdentity',
False,
[
],
'iana-if-type',
'iso88025Fiber',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ChannelIdentity' : {
'meta_info' : _MetaInfoClass('ChannelIdentity',
False,
[
],
'iana-if-type',
'channel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoiceemfgdIdentity' : {
'meta_info' : _MetaInfoClass('VoiceemfgdIdentity',
False,
[
],
'iana-if-type',
'voiceEMFGD',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AlugponphysicaluniIdentity' : {
'meta_info' : _MetaInfoClass('AlugponphysicaluniIdentity',
False,
[
],
'iana-if-type',
'aluGponPhysicalUni',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'A12MppswitchIdentity' : {
'meta_info' : _MetaInfoClass('A12MppswitchIdentity',
False,
[
],
'iana-if-type',
'a12MppSwitch',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IlanIdentity' : {
'meta_info' : _MetaInfoClass('IlanIdentity',
False,
[
],
'iana-if-type',
'ilan',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Pdnetherloop1Identity' : {
'meta_info' : _MetaInfoClass('Pdnetherloop1Identity',
False,
[
],
'iana-if-type',
'pdnEtherLoop1',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'X213Identity' : {
'meta_info' : _MetaInfoClass('X213Identity',
False,
[
],
'iana-if-type',
'x213',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SonetpathIdentity' : {
'meta_info' : _MetaInfoClass('SonetpathIdentity',
False,
[
],
'iana-if-type',
'sonetPath',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VoicefgdeanaIdentity' : {
'meta_info' : _MetaInfoClass('VoicefgdeanaIdentity',
False,
[
],
'iana-if-type',
'voiceFGDEANA',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Iso88025TokenringIdentity' : {
'meta_info' : _MetaInfoClass('Iso88025TokenringIdentity',
False,
[
],
'iana-if-type',
'iso88025TokenRing',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'PropatmIdentity' : {
'meta_info' : _MetaInfoClass('PropatmIdentity',
False,
[
],
'iana-if-type',
'propAtm',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AlueponphysicaluniIdentity' : {
'meta_info' : _MetaInfoClass('AlueponphysicaluniIdentity',
False,
[
],
'iana-if-type',
'aluEponPhysicalUni',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'StacktostackIdentity' : {
'meta_info' : _MetaInfoClass('StacktostackIdentity',
False,
[
],
'iana-if-type',
'stackToStack',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FrforwardIdentity' : {
'meta_info' : _MetaInfoClass('FrforwardIdentity',
False,
[
],
'iana-if-type',
'frForward',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'HomepnaIdentity' : {
'meta_info' : _MetaInfoClass('HomepnaIdentity',
False,
[
],
'iana-if-type',
'homepna',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'SdslIdentity' : {
'meta_info' : _MetaInfoClass('SdslIdentity',
False,
[
],
'iana-if-type',
'sdsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'VirtualipaddressIdentity' : {
'meta_info' : _MetaInfoClass('VirtualipaddressIdentity',
False,
[
],
'iana-if-type',
'virtualIpAddress',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'BscIdentity' : {
'meta_info' : _MetaInfoClass('BscIdentity',
False,
[
],
'iana-if-type',
'bsc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AtmradioIdentity' : {
'meta_info' : _MetaInfoClass('AtmradioIdentity',
False,
[
],
'iana-if-type',
'atmRadio',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'AviciopticaletherIdentity' : {
'meta_info' : _MetaInfoClass('AviciopticaletherIdentity',
False,
[
],
'iana-if-type',
'aviciOpticalEther',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'G9981Identity' : {
'meta_info' : _MetaInfoClass('G9981Identity',
False,
[
],
'iana-if-type',
'g9981',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'FibrechannelIdentity' : {
'meta_info' : _MetaInfoClass('FibrechannelIdentity',
False,
[
],
'iana-if-type',
'fibreChannel',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ShdslIdentity' : {
'meta_info' : _MetaInfoClass('ShdslIdentity',
False,
[
],
'iana-if-type',
'shdsl',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'EonIdentity' : {
'meta_info' : _MetaInfoClass('EonIdentity',
False,
[
],
'iana-if-type',
'eon',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'H323GatekeeperIdentity' : {
'meta_info' : _MetaInfoClass('H323GatekeeperIdentity',
False,
[
],
'iana-if-type',
'h323Gatekeeper',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'Hdh1822Identity' : {
'meta_info' : _MetaInfoClass('Hdh1822Identity',
False,
[
],
'iana-if-type',
'hdh1822',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'DvbrccupstreamIdentity' : {
'meta_info' : _MetaInfoClass('DvbrccupstreamIdentity',
False,
[
],
'iana-if-type',
'dvbRccUpstream',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'NsipIdentity' : {
'meta_info' : _MetaInfoClass('NsipIdentity',
False,
[
],
'iana-if-type',
'nsip',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'TransphdlcIdentity' : {
'meta_info' : _MetaInfoClass('TransphdlcIdentity',
False,
[
],
'iana-if-type',
'transpHdlc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'TermpadIdentity' : {
'meta_info' : _MetaInfoClass('TermpadIdentity',
False,
[
],
'iana-if-type',
'termPad',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'IpovercdlcIdentity' : {
'meta_info' : _MetaInfoClass('IpovercdlcIdentity',
False,
[
],
'iana-if-type',
'ipOverCdlc',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'CesIdentity' : {
'meta_info' : _MetaInfoClass('CesIdentity',
False,
[
],
'iana-if-type',
'ces',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
'ModemIdentity' : {
'meta_info' : _MetaInfoClass('ModemIdentity',
False,
[
],
'iana-if-type',
'modem',
_yang_ns._namespaces['iana-if-type'],
'ydk.models.ietf.iana_if_type'
),
},
}
| |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import os
import re
import inspect
import pytest
from atomic_reactor import constants as dconstants
from atomic_reactor.core import DockerTasker
from atomic_reactor.outer import PrivilegedBuildManager, DockerhostBuildManager
from atomic_reactor.util import ImageName
from tests.constants import LOCALHOST_REGISTRY, DOCKERFILE_GIT, DOCKERFILE_SUBDIR_PATH,\
DOCKERFILE_ERROR_BUILD_PATH, TEST_IMAGE, MOCK
if MOCK:
from tests.docker_mock import mock_docker
with_all_sources = pytest.mark.parametrize('source_params', [
{'provider': 'git', 'uri': 'https://github.com/fedora-cloud/Fedora-Dockerfiles.git',
'dockerfile_path': 'ssh/'},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_SUBDIR_PATH,
'dockerfile_path': 'ssh/'},
])
def assert_source_from_path_mounted_ok(caplog, tmpdir):
# assert that build json has properly modified source uri
container_uri = 'file://' + os.path.join(dconstants.CONTAINER_SHARE_PATH,
dconstants.CONTAINER_SHARE_SOURCE_SUBDIR)
container_uri_re = re.compile(r'build json mounted in container.*"uri": "%s"' % container_uri)
# verify that source code was copied in - actually only verifies
# that source dir has been created
source_exists = "source path is '%s'" %\
os.path.join(tmpdir, dconstants.CONTAINER_SHARE_SOURCE_SUBDIR)
assert any([container_uri_re.search(l.getMessage()) for l in caplog.records()])
assert source_exists in [l.getMessage() for l in caplog.records()]
# make sure that double source (i.e. source/source) is not created
source_path_is_re = re.compile(r"source path is '.*/source/source'")
assert not any([source_path_is_re.search(l.getMessage()) for l in caplog.records()])
@with_all_sources
def test_hostdocker_build(caplog, source_params):
if MOCK:
mock_docker()
image_name = ImageName(repo="atomic-reactor-test-ssh-image")
remote_image = image_name.copy()
remote_image.registry = LOCALHOST_REGISTRY
m = DockerhostBuildManager("buildroot-dh-fedora", {
"source": source_params,
"image": remote_image.to_str(),
"parent_registry": LOCALHOST_REGISTRY, # faster
"target_registries_insecure": True,
"parent_registry_insecure": True,
})
results = m.build()
dt = DockerTasker()
dt.pull_image(remote_image, insecure=True)
if source_params['provider'] == 'path':
assert_source_from_path_mounted_ok(caplog, m.temp_dir)
assert len(results.build_logs) > 0
#assert re.search(r'build json mounted in container .+"uri": %s' %
# os.path.join(dconstants.CONTAINER_SHARE_PATH, 'source'))
# assert isinstance(results.built_img_inspect, dict)
# assert len(results.built_img_inspect.items()) > 0
# assert isinstance(results.built_img_info, dict)
# assert len(results.built_img_info.items()) > 0
# assert isinstance(results.base_img_info, dict)
# assert len(results.base_img_info.items()) > 0
# assert len(results.base_plugins_output) > 0
# assert len(results.built_img_plugins_output) > 0
dt.remove_container(results.container_id)
dt.remove_image(remote_image)
@pytest.mark.parametrize('source_params', [
{'provider': 'git', 'uri': DOCKERFILE_GIT, 'provider_params': {'git_commit': 'error-build'}},
{'provider': 'path', 'uri': 'file://' + DOCKERFILE_ERROR_BUILD_PATH},
])
def test_hostdocker_error_build(source_params):
if MOCK:
mock_docker(wait_should_fail=True)
image_name = TEST_IMAGE
m = DockerhostBuildManager("buildroot-dh-fedora", {
"source": {
"provider": "git",
"uri": DOCKERFILE_GIT,
"provider_params": {"git_commit": "error-build"}
},
"image": image_name,
"parent_registry": LOCALHOST_REGISTRY, # faster
"target_registries_insecure": True,
"parent_registry_insecure": True,
})
results = m.build()
dt = DockerTasker()
assert len(results.build_logs) > 0
assert results.return_code != 0
dt.remove_container(results.container_id)
@with_all_sources
def test_privileged_gitrepo_build(caplog, source_params):
if MOCK:
mock_docker()
image_name = ImageName(repo="atomic-reactor-test-ssh-image")
remote_image = image_name.copy()
remote_image.registry = LOCALHOST_REGISTRY
m = PrivilegedBuildManager("buildroot-fedora", {
"source": source_params,
"image": remote_image.to_str(),
"parent_registry": LOCALHOST_REGISTRY, # faster
"target_registries_insecure": True,
"parent_registry_insecure": True,
})
results = m.build()
dt = DockerTasker()
dt.pull_image(remote_image, insecure=True)
if source_params['provider'] == 'path':
assert_source_from_path_mounted_ok(caplog, m.temp_dir)
assert len(results.build_logs) > 0
# assert isinstance(results.built_img_inspect, dict)
# assert len(results.built_img_inspect.items()) > 0
# assert isinstance(results.built_img_info, dict)
# assert len(results.built_img_info.items()) > 0
# assert isinstance(results.base_img_info, dict)
# assert len(results.base_img_info.items()) > 0
# assert len(results.base_plugins_output) > 0
# assert len(results.built_img_plugins_output) > 0
dt.remove_container(results.container_id)
dt.remove_image(remote_image)
@with_all_sources
def test_privileged_build(caplog, source_params):
if MOCK:
mock_docker()
image_name = ImageName(repo=TEST_IMAGE)
remote_image = image_name.copy()
remote_image.registry = LOCALHOST_REGISTRY
m = PrivilegedBuildManager("buildroot-fedora", {
"source": source_params,
"image": remote_image.to_str(),
"parent_registry": LOCALHOST_REGISTRY, # faster
"target_registries_insecure": True,
"parent_registry_insecure": True,
})
results = m.build()
dt = DockerTasker()
dt.pull_image(remote_image, insecure=True)
if source_params['provider'] == 'path':
assert_source_from_path_mounted_ok(caplog, m.temp_dir)
assert len(results.build_logs) > 0
# assert isinstance(results.built_img_inspect, dict)
# assert len(results.built_img_inspect.items()) > 0
# assert isinstance(results.built_img_info, dict)
# assert len(results.built_img_info.items()) > 0
# assert isinstance(results.base_img_info, dict)
# assert len(results.base_img_info.items()) > 0
# assert len(results.base_plugins_output) > 0
# assert len(results.built_img_plugins_output) > 0
dt.remove_container(results.container_id)
dt.remove_image(remote_image)
def test_if_all_versions_match():
def read_version(fp, regex):
with open(fp, "r") as fd:
content = fd.read()
found = re.findall(regex, content)
if len(found) == 1:
return found[0]
else:
raise Exception("Version not found!")
import atomic_reactor
from atomic_reactor import __version__
fp = inspect.getfile(atomic_reactor)
project_dir = os.path.dirname(os.path.dirname(fp))
specfile = os.path.join(project_dir, "atomic-reactor.spec")
setup_py = os.path.join(project_dir, "setup.py")
spec_version = read_version(specfile, r"\nVersion:\s*(.+?)\s*\n")
setup_py_version = read_version(setup_py, r"version=['\"](.+)['\"]")
assert spec_version == __version__
assert setup_py_version == __version__
| |
import abc
import time
import boto
from boto.emr.connection import EmrConnection
from boto.regioninfo import RegionInfo
from boto.emr.step import PigStep
import luigi
from luigi.s3 import S3Target, S3PathTask
from luigi.contrib.pig import PigJobTask
from amazon_web_service.luigi import target_factory
import logging
logger = logging.getLogger('luigi-interface')
class EmrClient(object):
# The Hadoop version to use
HADOOP_VERSION = '1.0.3'
# The AMI version to use
AMI_VERSION = '2.4.7'
# Interval to wait between polls to EMR cluster in seconds
CLUSTER_OPERATION_RESULTS_POLLING_SECONDS = 10
# Timeout for EMR creation and ramp up in seconds
CLUSTER_OPERATION_RESULTS_TIMEOUT_SECONDS = 60 * 30
def __init__(self, region_name='us-east-1', aws_access_key_id=None, aws_secret_access_key=None):
# If the access key is not specified, get it from the luigi config.cfg file
if not aws_access_key_id:
aws_access_key_id = luigi.configuration.get_config().get('aws', 'aws_access_key_id')
if not aws_secret_access_key:
aws_secret_access_key = luigi.configuration.get_config().get('aws', 'aws_secret_access_key')
# Create the region in which to run
region_endpoint = u'elasticmapreduce.%s.amazonaws.com' % (region_name)
region = RegionInfo(name=region_name, endpoint=region_endpoint)
self.emr_connection = EmrConnection(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region=region)
def launch_emr_cluster(self, cluster_name, log_uri, ec2_keyname=None, master_type='m1.small', core_type='m1.small', num_instances=2, hadoop_version='1.0.3', ami_version='2.4.7', ):
# TODO Remove
# install_pig_step = InstallPigStep()
jobflow_id = self.emr_connection.run_jobflow(name=cluster_name,
log_uri=log_uri,
ec2_keyname=ec2_keyname,
master_instance_type=master_type,
slave_instance_type=core_type,
num_instances=num_instances,
keep_alive=True,
enable_debugging=True,
hadoop_version=EmrClient.HADOOP_VERSION,
steps=[],
ami_version=EmrClient.AMI_VERSION)
# Log important information
status = self.emr_connection.describe_jobflow(jobflow_id)
logger.info('Creating new cluster %s with following details' % status.name)
logger.info('jobflow ID:\t%s' % status.jobflowid)
logger.info('Log URI:\t%s' % status.loguri)
logger.info('Master Instance Type:\t%s' % status.masterinstancetype)
# A cluster of size 1 does not have any slave instances
if hasattr(status, 'slaveinstancetype'):
logger.info('Slave Instance Type:\t%s' % status.slaveinstancetype)
logger.info('Number of Instances:\t%s' % status.instancecount)
logger.info('Hadoop Version:\t%s' % status.hadoopversion)
logger.info('AMI Version:\t%s' % status.amiversion)
logger.info('Keep Alive:\t%s' % status.keepjobflowalivewhennosteps)
return self._poll_until_cluster_ready(jobflow_id)
def add_pig_step(self, jobflow_id, pig_file, name='Pig Script', pig_versions='latest', pig_args=[]):
pig_step = PigStep(name=name,
pig_file=pig_file,
pig_versions=pig_versions,
pig_args=pig_args,
# action_on_failure='CONTINUE',
)
self.emr_connection.add_jobflow_steps(jobflow_id, [pig_step])
# Poll until the cluster is done working
return self._poll_until_cluster_ready(jobflow_id)
def shutdown_emr_cluster(self, jobflow_id):
self.emr_connection.terminate_jobflow(jobflow_id)
return self._poll_until_cluster_shutdown(jobflow_id)
def get_jobflow_id(self):
# Get the id of the cluster that is WAITING for work
return self.emr_connection.list_clusters(cluster_states=['WAITING']).clusters[0].id
def get_master_dns(self):
"""
Get the master node's public address
"""
# Get the jobflow ID
jobflow_id = self.get_master_dns()
# Use the jobflow ID to get the status
status = self.emr_connection.describe_jobflow(jobflow_id)
# Return the master's public dns
return status.masterpublicdnsname
def _poll_until_cluster_ready(self, jobflow_id):
start_time = time.time()
is_cluster_ready = False
while (not is_cluster_ready) and (time.time() - start_time < EmrClient.CLUSTER_OPERATION_RESULTS_TIMEOUT_SECONDS):
# Get the state
state = self.emr_connection.describe_jobflow(jobflow_id).state
if state == u'WAITING':
logger.info('Cluster intialized and is WAITING for work')
is_cluster_ready = True
elif (state == u'COMPLETED') or \
(state == u'SHUTTING_DOWN') or \
(state == u'FAILED') or \
(state == u'TERMINATED'):
logger.error('Error starting cluster; status: %s' % state)
# Poll until cluster shutdown
self._poll_until_cluster_shutdown(jobflow_id)
raise RuntimeError('Error, cluster failed to start')
else:
logger.debug('Cluster state: %s' % state)
time.sleep(EmrClient.CLUSTER_OPERATION_RESULTS_POLLING_SECONDS)
if not is_cluster_ready:
# TODO shutdown cluster
raise RuntimeError('Timed out waiting for EMR cluster to be active')
return jobflow_id
def _poll_until_cluster_shutdown(self, jobflow_id):
start_time = time.time()
is_cluster_shutdown = False
while (not is_cluster_shutdown) and (time.time() - start_time < EmrClient.CLUSTER_OPERATION_RESULTS_TIMEOUT_SECONDS):
# Get the state
state = self.emr_connection.describe_jobflow(jobflow_id).state
if (state == u'TERMINATED') or (state == u'COMPLETED'):
logger.info('Cluster successfully shutdown with status: %s' % state)
return False
elif state == u'FAILED':
logger.error('Cluster shutdown with FAILED status')
return False
else:
logger.debug('Cluster state: %s' % state)
time.sleep(EmrClient.CLUSTER_OPERATION_RESULTS_POLLING_SECONDS)
if not is_cluster_shutdown:
# TODO shutdown cluster
raise RuntimeError('Timed out waiting for EMR cluster to shut down')
return True
class EmrTask(luigi.Task):
@abc.abstractmethod
def output_token(self):
"""
Luigi Target providing path to a token that indicates completion of this Task.
:rtype: Target:
:returns: Target for Task completion token
"""
raise RuntimeError("Please implement the output_token method")
def output(self):
"""
The output for this Task. Returns the output token by default, so the task only runs if the
token does not already exist.
:rtype: Target:
:returns: Target for Task completion token
"""
return self.output_token()
class InitializeEmrCluster(EmrTask):
"""
Luigi Task to initialize a new EMR cluster.
This Task writes an output token to the location designated by the `output_token` method to
indicate that the clustger has been successfully create. The Task will fail if the cluster
cannot be initialized.
Cluster creation in EMR takes between several seconds and several minutes; this Task will
block until creation has finished.
"""
# The s3 URI to write logs to, ex: s3://my.bucket/logs
log_uri = luigi.Parameter()
# The Key pair name of the key to connect
ec2_keyname = luigi.Parameter(None)
# The friendly name for the cluster
cluster_name = luigi.Parameter(default='EMR Cluster')
# The EC2 type to use for the master
master_type = luigi.Parameter(default='m1.small')
# The EC2 type to use for the slaves
core_type = luigi.Parameter(default='m1.small')
# The number of instances in the cluster
num_instances = luigi.IntParameter(default=1)
# The version of hadoop to use
hadoop_version = luigi.Parameter(default='1.0.3')
# The AMI version to use
ami_version = luigi.Parameter(default='2.4.7')
def run(self):
"""
Create the EMR cluster
"""
emr_client = EmrClient()
emr_client.launch_emr_cluster(ec2_keyname=self.ec2_keyname,
log_uri=self.log_uri,
cluster_name=self.cluster_name,
master_type=self.master_type,
core_type=self.core_type,
num_instances=self.num_instances,
hadoop_version=self.hadoop_version,
ami_version=self.ami_version)
target_factory.write_file(self.output_token())
class TerminateEmrCluster(EmrTask):
def run(self):
emr_client = EmrClient()
jobflow_id = emr_client.get_jobflow_id()
emr_client.shutdown_emr_cluster(jobflow_id)
target_factory.write_file(self.output_token())
class EmrPigTask(EmrTask):
# The absolute path to the root of the pigscript directory
pig_path = luigi.Parameter()
def run(self):
emr_client = EmrClient()
jobflow_id = emr_client.get_jobflow_id()
logger.debug('Adding task to jobflow: %s' % jobflow_id)
pig_args=self.pig_args()
emr_client.add_pig_step(jobflow_id=jobflow_id,
pig_file=self.pig_path,
pig_args=pig_args)
@abc.abstractmethod
def pig_args(self):
"""
List of args to tell the pig task how to run
:rtype: List:
:returns: list of args for the pig task
"""
raise RuntimeError("Please implement the build_args method")
| |
"""Support for OwnTracks."""
from collections import defaultdict
import json
import logging
import re
from aiohttp.web import json_response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_when_setup
from .config_flow import CONF_SECRET
from .messages import async_handle_message
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'owntracks'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = 'waypoints'
CONF_WAYPOINT_WHITELIST = 'waypoint_whitelist'
CONF_MQTT_TOPIC = 'mqtt_topic'
CONF_REGION_MAPPING = 'region_mapping'
CONF_EVENTS_ONLY = 'events_only'
BEACON_DEV_ID = 'beacon'
DEFAULT_OWNTRACKS_TOPIC = 'owntracks/#'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN, default={}): {
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_EVENTS_ONLY, default=False): cv.boolean,
vol.Optional(CONF_MQTT_TOPIC, default=DEFAULT_OWNTRACKS_TOPIC):
mqtt.valid_subscribe_topic,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}),
cv.string),
vol.Optional(CONF_REGION_MAPPING, default={}): dict,
vol.Optional(CONF_WEBHOOK_ID): cv.string,
}
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Initialize OwnTracks component."""
hass.data[DOMAIN] = {
'config': config[DOMAIN],
'devices': {},
'unsub': None,
}
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={}
))
return True
async def async_setup_entry(hass, entry):
"""Set up OwnTracks entry."""
config = hass.data[DOMAIN]['config']
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET) or entry.data[CONF_SECRET]
region_mapping = config.get(CONF_REGION_MAPPING)
events_only = config.get(CONF_EVENTS_ONLY)
mqtt_topic = config.get(CONF_MQTT_TOPIC)
context = OwnTracksContext(hass, secret, max_gps_accuracy,
waypoint_import, waypoint_whitelist,
region_mapping, events_only, mqtt_topic)
webhook_id = config.get(CONF_WEBHOOK_ID) or entry.data[CONF_WEBHOOK_ID]
hass.data[DOMAIN]['context'] = context
async_when_setup(hass, 'mqtt', async_connect_mqtt)
hass.components.webhook.async_register(
DOMAIN, 'OwnTracks', webhook_id, handle_webhook)
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, 'device_tracker'))
hass.data[DOMAIN]['unsub'] = \
hass.helpers.dispatcher.async_dispatcher_connect(
DOMAIN, async_handle_message)
return True
async def async_unload_entry(hass, entry):
"""Unload an OwnTracks config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
await hass.config_entries.async_forward_entry_unload(
entry, 'device_tracker')
hass.data[DOMAIN]['unsub']()
return True
async def async_remove_entry(hass, entry):
"""Remove an OwnTracks config entry."""
if (not entry.data.get('cloudhook') or
'cloud' not in hass.config.components):
return
await hass.components.cloud.async_delete_cloudhook(
entry.data[CONF_WEBHOOK_ID])
async def async_connect_mqtt(hass, component):
"""Subscribe to MQTT topic."""
context = hass.data[DOMAIN]['context']
async def async_handle_mqtt_message(msg):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(msg.payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", msg.payload)
return
message['topic'] = msg.topic
hass.helpers.dispatcher.async_dispatcher_send(
DOMAIN, hass, context, message)
await hass.components.mqtt.async_subscribe(
context.mqtt_topic, async_handle_mqtt_message, 1)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback.
iOS sets the "topic" as part of the payload.
Android does not set a topic but adds headers to the request.
"""
context = hass.data[DOMAIN]['context']
try:
message = await request.json()
except ValueError:
_LOGGER.warning('Received invalid JSON from OwnTracks')
return json_response([])
# Android doesn't populate topic
if 'topic' not in message:
headers = request.headers
user = headers.get('X-Limit-U')
device = headers.get('X-Limit-D', user)
if user:
topic_base = re.sub('/#$', '', context.mqtt_topic)
message['topic'] = '{}/{}/{}'.format(topic_base, user, device)
elif message['_type'] != 'encrypted':
_LOGGER.warning('No topic or user found in message. If on Android,'
' set a username in Connection -> Identification')
# Keep it as a 200 response so the incorrect packet is discarded
return json_response([])
hass.helpers.dispatcher.async_dispatcher_send(
DOMAIN, hass, context, message)
return json_response([])
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(self, hass, secret, max_gps_accuracy, import_waypoints,
waypoint_whitelist, region_mapping, events_only, mqtt_topic):
"""Initialize an OwnTracks context."""
self.hass = hass
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(set)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
self.region_mapping = region_mapping
self.events_only = events_only
self.mqtt_topic = mqtt_topic
self._pending_msg = []
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
acc = message.get('acc')
if acc is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message['_type'], message)
return False
if self.max_gps_accuracy is not None and \
acc > self.max_gps_accuracy:
_LOGGER.info("Ignoring %s update because expected GPS "
"accuracy %s is not met: %s",
message['_type'], self.max_gps_accuracy,
message)
return False
return True
@callback
def set_async_see(self, func):
"""Set a new async_see function."""
self.async_see = func
for msg in self._pending_msg:
func(**msg)
self._pending_msg.clear()
# pylint: disable=method-hidden
@callback
def async_see(self, **data):
"""Send a see message to the device tracker."""
self._pending_msg.append(data)
@callback
def async_see_beacons(self, hass, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# Mobile beacons should always be set to the location of the
# tracking device. I get the device state and make the necessary
# changes to kwargs.
device_tracker_state = hass.states.get(
"device_tracker.{}".format(dev_id))
if device_tracker_state is not None:
acc = device_tracker_state.attributes.get("gps_accuracy")
lat = device_tracker_state.attributes.get("latitude")
lon = device_tracker_state.attributes.get("longitude")
if lat is not None and lon is not None:
kwargs['gps'] = (lat, lon)
kwargs['gps_accuracy'] = acc
else:
kwargs['gps'] = None
kwargs['gps_accuracy'] = None
# the battery state applies to the tracking device, not the beacon
# kwargs location is the beacon's configured lat/lon
kwargs.pop('battery', None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
self.async_see(**kwargs)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
import numpy as np
from monty.serialization import loadfn
from pymatgen.electronic_structure.core import OrbitalType, Spin
from pymatgen.io.vasp import Vasprun
from pymatgen.util.testing import PymatgenTest
try:
from pymatgen.electronic_structure.boltztrap2 import (
BandstructureLoader,
BztInterpolator,
BztPlotter,
BztTransportProperties,
VasprunBSLoader,
VasprunLoader,
)
BOLTZTRAP2_PRESENT = True
except Exception:
BOLTZTRAP2_PRESENT = False
# BOLTZTRAP2_PRESENT = False
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "boltztrap2")
vrunfile = os.path.join(test_dir, "vasprun.xml")
vrun = Vasprun(vrunfile, parse_projected_eigen=True)
vrunfile_sp = os.path.join(test_dir, "vasprun_spin.xml")
vrun_sp = Vasprun(vrunfile_sp, parse_projected_eigen=True)
bs = loadfn(os.path.join(test_dir, "PbTe_bandstructure.json"))
bs_sp = loadfn(os.path.join(test_dir, "N2_bandstructure.json"))
bztinterp_fn = os.path.join(test_dir, "bztInterp.json.gz")
bzttransp_fn = os.path.join(test_dir, "bztTranspProps.json.gz")
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class VasprunBSLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = VasprunBSLoader(vrun)
self.assertIsNotNone(self.loader)
self.loader = VasprunBSLoader(bs, vrun.final_structure)
self.assertIsNotNone(self.loader)
self.loader = VasprunBSLoader.from_file(vrunfile)
self.assertIsNotNone(self.loader)
warnings.simplefilter("ignore")
self.loader_sp = VasprunBSLoader(vrun_sp)
self.assertIsNotNone(self.loader_sp)
self.loader_sp = VasprunBSLoader(bs_sp, vrun_sp.final_structure)
self.assertIsNotNone(self.loader_sp)
self.loader_sp = VasprunBSLoader.from_file(vrunfile_sp)
self.assertIsNotNone(self.loader_sp)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertEqual(self.loader.is_spin_polarized, False)
self.assertAlmostEqual(self.loader.fermi, 0.185266535678, 5)
self.assertAlmostEqual(self.loader.structure.lattice.a, 4.64303565932548, 5)
self.assertEqual(self.loader.nelect_all, 20.0)
self.assertEqual(self.loader_sp.nelect_all, 10.0)
self.assertTupleEqual(self.loader.ebands_all.shape, (20, 120))
self.assertAlmostEqual(self.loader.ebands_all[10, 100], 0.2708057, 5)
self.assertEqual(len(self.loader.proj_all), 1)
self.assertTupleEqual(self.loader.proj_all[Spin.up].shape, (120, 20, 2, 9))
self.assertEqual(self.loader_sp.is_spin_polarized, True)
self.assertTupleEqual(self.loader_sp.ebands_all.shape, (24, 198))
self.assertAlmostEqual(self.loader_sp.ebands_all[10, 100], 0.2543788, 4)
self.assertAlmostEqual(self.loader_sp.ebands_all[22, 100], 0.2494617, 4)
self.assertEqual(len(self.loader_sp.proj_all), 2)
self.assertTupleEqual(self.loader_sp.proj_all[Spin.down].shape, (198, 12, 2, 9))
def test_get_volume(self):
self.assertAlmostEqual(self.loader.get_volume(), 477.6256714925874, 5)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BandstructureLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = BandstructureLoader(bs, vrun.structures[-1])
self.assertIsNotNone(self.loader)
self.loader_sp = BandstructureLoader(bs_sp, vrun_sp.structures[-1])
self.assertIsNotNone(self.loader_sp)
self.assertTupleEqual(self.loader_sp.ebands_all.shape, (24, 198))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertTupleEqual(self.loader.ebands_all.shape, (20, 120))
self.assertAlmostEqual(self.loader.fermi, 0.185266535678, 5)
self.assertAlmostEqual(self.loader.structure.lattice.a, 4.64303565932548, 5)
def test_get_volume(self):
self.assertAlmostEqual(self.loader.get_volume(), 477.6256714925874, 5)
# def test_set_upper_lower_bands(self):
# min_bnd = min(self.loader_sp_up.ebands.min(),
# self.loader_sp_dn.ebands.min())
# max_bnd = max(self.loader_sp_up.ebands.max(),
# self.loader_sp_dn.ebands.max())
# self.loader_sp_up.set_upper_lower_bands(min_bnd, max_bnd)
# self.loader_sp_dn.set_upper_lower_bands(min_bnd, max_bnd)
# self.assertTupleEqual(self.loader_sp_up.ebands.shape, (14, 198))
# self.assertTupleEqual(self.loader_sp_dn.ebands.shape, (14, 198))
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class VasprunLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = VasprunLoader(vrun)
self.assertTupleEqual(self.loader.proj.shape, (120, 20, 2, 9))
self.assertIsNotNone(self.loader)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertTupleEqual(self.loader.ebands.shape, (20, 120))
self.assertAlmostEqual(self.loader.fermi, 0.185266535678, 5)
self.assertAlmostEqual(self.loader.structure.lattice.a, 4.64303565932548, 5)
def test_get_volume(self):
self.assertAlmostEqual(self.loader.get_volume(), 477.6256714925874, 5)
def test_from_file(self):
self.loader = VasprunLoader().from_file(vrunfile)
self.assertIsNotNone(self.loader)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BztInterpolatorTest(unittest.TestCase):
def setUp(self):
self.loader = VasprunBSLoader(vrun)
self.bztInterp = BztInterpolator(self.loader, lpfac=2)
self.assertIsNotNone(self.bztInterp)
self.bztInterp = BztInterpolator(self.loader, lpfac=2, save_bztInterp=True, fname=bztinterp_fn)
self.assertIsNotNone(self.bztInterp)
self.bztInterp = BztInterpolator(self.loader, load_bztInterp=True, fname=bztinterp_fn)
self.assertIsNotNone(self.bztInterp)
warnings.simplefilter("ignore")
self.loader_sp = VasprunBSLoader(vrun_sp)
self.bztInterp_sp = BztInterpolator(self.loader_sp, lpfac=2)
self.assertIsNotNone(self.bztInterp_sp)
self.bztInterp_sp = BztInterpolator(self.loader_sp, lpfac=2, save_bztInterp=True, fname=bztinterp_fn)
self.assertIsNotNone(self.bztInterp_sp)
self.bztInterp_sp = BztInterpolator(self.loader_sp, lpfac=2, load_bztInterp=True, fname=bztinterp_fn)
self.assertIsNotNone(self.bztInterp_sp)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertTupleEqual(self.bztInterp.cband.shape, (6, 3, 3, 3, 29791))
self.assertTupleEqual(self.bztInterp.eband.shape, (6, 29791))
self.assertTupleEqual(self.bztInterp.coeffs.shape, (6, 322))
self.assertEqual(self.bztInterp.data.nelect, 6.0)
self.assertEqual(self.bztInterp.data.nelect_all, 20.0)
self.assertTupleEqual(self.bztInterp.data.ebands.shape, (6, 120))
self.assertTupleEqual(self.bztInterp_sp.cband.shape, (10, 3, 3, 3, 23275))
self.assertTupleEqual(self.bztInterp_sp.eband.shape, (10, 23275))
self.assertTupleEqual(self.bztInterp_sp.coeffs.shape, (10, 519))
self.assertEqual(self.bztInterp_sp.data.nelect, 6.0)
self.assertEqual(self.bztInterp_sp.data.nelect_all, 10.0)
self.assertTupleEqual(self.bztInterp_sp.data.ebands.shape, (10, 198))
def test_get_band_structure(self):
sbs = self.bztInterp.get_band_structure()
self.assertIsNotNone(sbs)
self.assertTupleEqual(sbs.bands[Spin.up].shape, (6, 137))
kpaths = [["L", "K"]]
kp_lbl = {"L": np.array([0.5, 0.5, 0.5]), "K": np.array([0.375, 0.375, 0.75])}
sbs = self.bztInterp.get_band_structure(kpaths, kp_lbl)
self.assertIsNotNone(sbs)
self.assertTupleEqual(sbs.bands[Spin.up].shape, (6, 20))
sbs = self.bztInterp_sp.get_band_structure()
self.assertIsNotNone(sbs)
self.assertTupleEqual(sbs.bands[Spin.up].shape, (6, 143))
self.assertTupleEqual(sbs.bands[Spin.down].shape, (4, 143))
def test_tot_dos(self):
tot_dos = self.bztInterp.get_dos(T=200, npts_mu=100)
self.assertIsNotNone(tot_dos)
self.assertEqual(len(tot_dos.energies), 100)
self.assertAlmostEqual(tot_dos.densities[Spin.up][0], 1.35371715, 5)
tot_dos = self.bztInterp_sp.get_dos(T=200, npts_mu=100)
self.assertIsNotNone(tot_dos)
self.assertEqual(len(tot_dos.energies), 100)
self.assertAlmostEqual(tot_dos.densities[Spin.up][75], 88.034456, 5)
self.assertAlmostEqual(tot_dos.densities[Spin.down][75], 41.421367, 5)
def test_tot_proj_dos(self):
tot_proj_dos = self.bztInterp.get_dos(partial_dos=True, T=200, npts_mu=100)
self.assertIsNotNone(tot_proj_dos)
self.assertEqual(len(tot_proj_dos.get_spd_dos().values()), 3)
pdos = tot_proj_dos.get_spd_dos()[OrbitalType.s].densities[Spin.up][75]
self.assertAlmostEqual(pdos, 2490.169396, 5)
tot_proj_dos = self.bztInterp_sp.get_dos(partial_dos=True, T=200, npts_mu=100)
self.assertIsNotNone(tot_proj_dos)
self.assertEqual(len(tot_proj_dos.get_spd_dos().values()), 3)
pdos = tot_proj_dos.get_spd_dos()[OrbitalType.s].densities[Spin.up][75]
self.assertAlmostEqual(pdos, 166.4933305, 5)
pdos = tot_proj_dos.get_spd_dos()[OrbitalType.s].densities[Spin.down][75]
self.assertAlmostEqual(pdos, 272.194174, 5)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BztTransportPropertiesTest(unittest.TestCase):
def setUp(self):
loader = VasprunBSLoader(vrun)
bztInterp = BztInterpolator(loader, lpfac=2)
self.bztTransp = BztTransportProperties(bztInterp, temp_r=np.arange(300, 600, 100))
self.assertIsNotNone(self.bztTransp)
warnings.simplefilter("ignore")
self.bztTransp = BztTransportProperties(
bztInterp, doping=10.0 ** np.arange(20, 22), temp_r=np.arange(300, 600, 100)
)
self.assertIsNotNone(self.bztTransp)
self.assertEqual(self.bztTransp.contain_props_doping, True)
warnings.simplefilter("ignore")
bztInterp = BztInterpolator(loader, lpfac=2)
self.bztTransp = BztTransportProperties(
bztInterp,
temp_r=np.arange(300, 600, 100),
save_bztTranspProps=True,
fname=bzttransp_fn,
)
self.assertIsNotNone(self.bztTransp)
warnings.simplefilter("ignore")
bztInterp = BztInterpolator(loader, lpfac=2)
self.bztTransp = BztTransportProperties(bztInterp, load_bztTranspProps=True, fname=bzttransp_fn)
self.assertIsNotNone(self.bztTransp)
warnings.simplefilter("ignore")
loader_sp = VasprunBSLoader(vrun_sp)
bztInterp_sp = BztInterpolator(loader_sp, lpfac=2)
self.bztTransp_sp = BztTransportProperties(bztInterp_sp, temp_r=np.arange(300, 600, 100))
self.assertIsNotNone(self.bztTransp_sp)
warnings.simplefilter("ignore")
bztInterp_sp = BztInterpolator(loader_sp, lpfac=2)
self.bztTransp_sp = BztTransportProperties(
bztInterp_sp,
temp_r=np.arange(300, 600, 100),
save_bztTranspProps=True,
fname=bzttransp_fn,
)
self.assertIsNotNone(self.bztTransp_sp)
warnings.simplefilter("ignore")
bztInterp_sp = BztInterpolator(loader_sp, lpfac=2)
self.bztTransp_sp = BztTransportProperties(bztInterp_sp, load_bztTranspProps=True, fname=bzttransp_fn)
self.assertIsNotNone(self.bztTransp_sp)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
for p in [
self.bztTransp.Conductivity_mu,
self.bztTransp.Seebeck_mu,
self.bztTransp.Kappa_mu,
self.bztTransp.Effective_mass_mu,
self.bztTransp.Power_Factor_mu,
]:
self.assertTupleEqual(p.shape, (3, 3686, 3, 3))
for p in [
self.bztTransp.Carrier_conc_mu,
self.bztTransp.Hall_carrier_conc_trace_mu,
]:
self.assertTupleEqual(p.shape, (3, 3686))
for p in [
self.bztTransp_sp.Conductivity_mu,
self.bztTransp_sp.Seebeck_mu,
self.bztTransp_sp.Kappa_mu,
self.bztTransp_sp.Effective_mass_mu,
self.bztTransp_sp.Power_Factor_mu,
]:
self.assertTupleEqual(p.shape, (3, 3252, 3, 3))
for p in [
self.bztTransp_sp.Carrier_conc_mu,
self.bztTransp_sp.Hall_carrier_conc_trace_mu,
]:
self.assertTupleEqual(p.shape, (3, 3252))
def test_compute_properties_doping(self):
self.bztTransp.compute_properties_doping(doping=10.0 ** np.arange(20, 22))
for p in [
self.bztTransp.Conductivity_doping,
self.bztTransp.Seebeck_doping,
self.bztTransp.Kappa_doping,
self.bztTransp.Effective_mass_doping,
self.bztTransp.Power_Factor_doping,
]:
self.assertTupleEqual(p["n"].shape, (3, 2, 3, 3))
self.assertEqual(self.bztTransp.contain_props_doping, True)
self.bztTransp_sp.compute_properties_doping(doping=10.0 ** np.arange(20, 22))
for p in [
self.bztTransp_sp.Conductivity_doping,
self.bztTransp_sp.Seebeck_doping,
self.bztTransp_sp.Kappa_doping,
self.bztTransp_sp.Effective_mass_doping,
self.bztTransp_sp.Power_Factor_doping,
]:
self.assertTupleEqual(p["n"].shape, (3, 2, 3, 3))
self.assertEqual(self.bztTransp_sp.contain_props_doping, True)
@unittest.skipIf(not BOLTZTRAP2_PRESENT, "No boltztrap2, skipping tests...")
class BztPlotterTest(unittest.TestCase):
def test_plot(self):
loader = VasprunBSLoader(vrun)
bztInterp = BztInterpolator(loader, lpfac=2)
bztTransp = BztTransportProperties(bztInterp, temp_r=np.arange(300, 600, 100))
self.bztPlotter = BztPlotter(bztTransp, bztInterp)
self.assertIsNotNone(self.bztPlotter)
fig = self.bztPlotter.plot_props("S", "mu", "temp", temps=[300, 500])
self.assertIsNotNone(fig)
fig = self.bztPlotter.plot_bands()
self.assertIsNotNone(fig)
fig = self.bztPlotter.plot_dos()
self.assertIsNotNone(fig)
if __name__ == "__main__":
unittest.main()
| |
#-------------------------------------------------------------------------------
#Name: rptest
#Purpose: test module for refprop and multiRP
#
#Author: Thelen, B.J.
# thelen_ben@yahoo.com
#-------------------------------------------------------------------------------
'''Allow refprop and multiRP module functional test of all functions'''
####################################################### test if some windows functions are working now with rp9.1
from decimal import Decimal
import platform
def settest(test):
'''set test module
'refprop' or 'multiRP'
and execute test run'''
if test == 'refprop':
import refprop as rp
_maintest(rp)
elif test == 'multiRP':
import multiRP as rp
_maintest(rp)
#main test def. for usage at refprop and multiRP
def _maintest(rp):
#examples and test setup
rp.SetErrorDebug.off() #turn on =>> for testing purpose
if rp.test(): #if True; rptest =>>for testing purpose
print('refprop installed correctely')
print('test results')
print(rp.testresult)
print('fluidlib')
rp.fluidlib()
print('\n')
prop = rp.setup('def', 'air',)
print('setup air')
print(prop, '\n')
x = prop['x']
print('critp(x)')
print(rp.critp(x), '\n')
print('setup water ammonia')
print(rp.setup('def', 'water', 'ammonia',), '\n')
#alternative setup input
rp.setup('def', ['water', 'ammonia'],)
x = [0.5, 0.3]
prop = rp.normalize(x)
x = prop['x']
prop = rp.critp(x)
prop = rp.therm(prop['tcrit'], prop['Dcrit'], x)
print('therm')
print(prop, '\n')
p = prop['p']
print('therm2')
print(rp.therm2(prop['t'], prop['D'], x), '\n')
print('therm0')
print(rp.therm0(prop['t'], prop['D'], x), '\n')
print('residual')
print(rp.residual(prop['t'], prop['D'], x), '\n')
print('entro')
print(rp.entro(prop['t'], prop['D'], x), '\n')
print('enthal')
print(rp.enthal(prop['t'], prop['D'], x), '\n')
print('ag')
print(rp.ag(prop['t'], prop['D'], x), '\n')
print('cvcp')
print(rp.cvcp(prop['t'], prop['D'], x), '\n')
print('dddp')
print(rp.dddp(prop['t'], prop['D'], x), '\n')
print('dddt')
print(rp.dddt(prop['t'], prop['D'], x), '\n')
print('dhd1')
print(rp.dhd1(prop['t'], prop['D'], x), '\n')
print('dpdd')
print(rp.dpdd(prop['t'], prop['D'], x), '\n')
print('dpdd2')
print(rp.dpdd2(prop['t'], prop['D'], x), '\n')
print('dpdt')
print(rp.dpdt(prop['t'], prop['D'], x), '\n')
D = prop['D']
#function not supported in Windows
if platform.system() == 'Linux':
print('dcdt')
print(rp.dcdt(prop['t'], x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('dcdt2')
print(rp.dcdt2(prop['t'], x), '\n')
print('fgcty')
print(rp.fgcty(prop['t'], D, x), '\n')
print('gibbs')
print(rp.gibbs(prop['t'], prop['D'], x), '\n')
#~ print('fgcty2')
#~ print(rp.fgcty2(prop['t'], prop['D'], x), '\n')
prop = rp.therm3(prop['t'], prop['D'], x)
print('therm3')
print(prop, '\n')
D = prop['D']
print('virb')
print(rp.virb(prop['t'], x), '\n')
print('virc')
print(rp.virc(prop['t'], x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('vird')
print(rp.vird(prop['t'], x), '\n')
print('virba')
print(rp.virba(prop['t'], x), '\n')
print('virca')
print(rp.virca(prop['t'], x), '\n')
print('cvcpk')
print(rp.cvcpk(1, prop['t'], D), '\n')
print('dbdt')
print(rp.dbdt(prop['t'], x), '\n')
print('dpddk')
print(rp.dpddk(1, prop['t'], D), '\n')
print('dpdtk')
print(rp.dpdtk(2, prop['t'], D), '\n')
D = 55
t = 373
prop = rp.press(t, D, x)
print('press')
print(prop, '\n')
p = prop['p']
print('purefld(1)')
prop = rp.purefld(1)
print(prop, '\n')
x = [1]
resetup_test_prop_d = prop
print('satt')
prop = rp.satt(t, x)
print(prop, '\n')
print('satp')
prop = rp.satp(prop['p'], x)
print(prop, '\n')
print('satd')
print(rp.satd(prop['Dliq'], x), '\n')
print('sath')
print(rp.sath(47000, x, 0), '\n')
print('sate')
print(rp.sate(0.46047E-13, x), '\n')
print('sats')
print(rp.sats(50, x, 0), '\n')
print('purefld(0)')
print(rp.purefld(0), '\n')
x = [0.5, 0.3]
x = rp.normalize(x)['x']
print('csatk')
print(rp.csatk(1, t), '\n')
print('dptsatk')
print(rp.dptsatk(1, t), '\n')
print('cv2pk')
print(rp.cv2pk(2, t, D), '\n')
print('tprho')
print(rp.tprho(t, p, x, 2, 1, 58), '\n')
print('flsh, tp')
prop = rp.flsh('tp', t, p, x)
print(prop, '\n')
print('flsh, th')
print(rp.flsh('tH', 305, prop['h'], x, 1), '\n')
print('flsh, tD')
print(rp.flsh('tD', t, 30, x), '\n')
print('info()')
print(rp.info(), '\n')
print('info(2)')
print(rp.info(2), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('rmix2')
print(rp.rmix2(x), '\n')
print('xmass')
prop = rp.xmass(x)
print(prop, '\n')
print('xmole')
print(rp.xmole(prop['xkg']), '\n')
print('limitx')
print(rp.limitx(x, 'eos', t, D, p), '\n')
print('limitk')
print(rp.limitk('eos', 1, t, D, p), '\n')
print('limits')
print(rp.limits(x), '\n')
print('flsh, ts')
prop = rp.flsh('ts', t, 40, x)
print(prop, '\n')
print('flsh, te')
print(rp.flsh('te', t, prop['e'], x), '\n')
print('flsh, pD')
prop = rp.flsh('Pd', p, D, x)
print(prop, '\n')
print('flsh, ph')
prop = rp.flsh('ph', p, prop['h'], x)
print(prop, '\n')
print('flsh, ps')
prop = rp.flsh('ps', p, prop['s'], x)
print(prop, '\n')
print('flsh, pe')
prop = rp.flsh('pE', p, prop['e'], x)
print(prop, '\n')
print('flsh, es')
prop = rp.flsh('es', prop['e'], prop['s'], x)
print(prop, '\n')
print('flsh, hs')
prop = rp.flsh('hs', 40000, 100, x)
print(prop, '\n')
print('flsh, es')
print(rp.flsh('es', 175, 13, x), '\n')
print('flsh, Dh')
print(rp.flsh('DH', 20, 18000, x), '\n')
print('flsh, Ds')
prop = rp.flsh('Ds', 20, 50, x)
print(prop, '\n')
print('flsh, De')
prop = rp.flsh('DE', 20, prop['e'], x)
print(prop, '\n')
print('flsh, tq')
prop = rp.flsh('tq', t, prop['q'], x)
print(prop, '\n')
print('flsh, pq')
print(rp.flsh('pq', 1200, prop['q'], x), '\n')
prop = rp.flsh('tp', 350, 1200, x)
print('flsh, tp')
print(prop, '\n')
s = prop['s']
e = prop['e']
h = prop['h']
D = prop['D']
t = prop['t']
p = prop['p']
Dmin = 40
Dmax = 55
print('flsh1, liq, ph')
print(rp.flsh1('Ph', p, h, x, 1), '\n')
print('getphase')
print(rp.getphase(prop), '\n')
print('flsh1, liq, pD')
print(rp.flsh1('PD', p, D, x), '\n')
print('flsh1, liq, ps')
print(rp.flsh1('Ps', p, s, x), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, th')
print(rp.flsh1('th', t, h, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, ts')
print(rp.flsh1('ts', t, s, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, te')
print(rp.flsh1('te', t, e, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, pe')
print(rp.flsh1('Pe', p, e, x), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, hs')
print(rp.flsh1('hs', h, s, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, Dh')
print(rp.flsh1('Dh', D, h, x), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, Ds')
print(rp.flsh1('Ds', D, s, x), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, liq, De')
print(rp.flsh1('De', D, e, x), '\n')
prop = rp.flsh('tp', 400, 100, x)
s = prop['s']
e = prop['e']
h = prop['h']
D = prop['D']
Dmin = 0.01
Dmax = 0.05
t = prop['t']
p = prop['p']
print('flsh1, vap, ph')
print(rp.flsh1('Ph', p, h, x, 2), '\n')
print('getphase')
print(rp.getphase(prop), '\n')
print('flsh1, vap, pD')
print(rp.flsh1('PD', p, D, x, 2), '\n')
print('flsh1, vap, ps')
print(rp.flsh1('Ps', p, s, x, 2), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, th')
print(rp.flsh1('th', t, h, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, ts')
print(rp.flsh1('ts', t, s, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, te')
print(rp.flsh1('te', t, e, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, pe')
print(rp.flsh1('Pe', p, e, x, 2), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, hs')
print(rp.flsh1('hs', h, s, x, Dmin=Dmin, Dmax=Dmax), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, Dh')
print(rp.flsh1('Dh', D, h, x), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, Ds')
print(rp.flsh1('Ds', D, s, x), '\n')
#unsupported in Windows
if platform.system() == 'Linux':
print('flsh1, vap, De')
print(rp.flsh1('De', D, e, x), '\n')
print('cstar')
print(rp.cstar(t, p, 8, x), '\n')
print('fpv')
print(rp.fpv(t, D, p, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('excess')
print(rp.excess(t, p, x, kph=2), '\n')
prop = rp.flsh('pq', 1200, 0.65, x)
D = prop['D']
Dliq = prop['Dliq']
Dvap = prop['Dvap']
xliq = prop['xliq']
xvap = prop['xvap']
e = prop['e']
h = prop['h']
s = prop['s']
q = prop['q']
p = prop['p']
t = prop['t']
#function not supported in Windows
if platform.system() == 'Linux':
print('tpfl2')
print(rp.flsh2('tp', t, p, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('Dhfl2')
print(rp.flsh2('Dh', D, h, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('Dsfl2')
print(rp.flsh2('Ds', D, s, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('Defl2')
print(rp.flsh2('De', D, e, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('thfl2')
print(rp.flsh2('th', t, h, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('tsfl2')
print(rp.flsh2('ts', t, s, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('tefl2')
print(rp.flsh2('te', t, e, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('tDfl2')
print(rp.flsh2('tD', t, D, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('pDfl2')
print(rp.flsh2('pD', p, D, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('phfl2')
print(rp.flsh2('ph', p, h, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('psfl2')
print(rp.flsh2('ps', p, s, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('pefl2')
print(rp.flsh2('pe', p, e, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('tqfl2')
print(rp.flsh2('tq', t, q, x, ksat=0), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('pqfl2')
print(rp.flsh2('pq', p, q, x, ksat=0), '\n')
#function not supported in Windows
#~ if platform.system() == 'Linux':
#~ print('Dqfl2')
#~ print(rp.flsh2('Dq', D, q, x), '\n')
prop = rp.flsh('tp', 340, 100, x)
t = prop['t']
Dliq = prop['Dliq']
Dvap = prop['Dvap']
xliq = prop['xliq']
xvap = prop['xvap']
print('qmass')
prop = rp.qmass(prop['q'], xliq, xvap)
print(prop, '\n')
print('qmole')
print(rp.qmole(prop['qkg'], prop['xlkg'], prop['xvkg']), '\n')
print('wmol')
print(rp.wmol(x), '\n')
prop = rp.flsh('tp', 340, 100, x)
print('dielec')
print(rp.dielec(prop['t'], prop['D'], x), '\n')
print('surten')
print(rp.surten (t, Dliq, Dvap, xliq, xvap), '\n')
print('surft')
print(rp.surft(240, x), '\n')
rp.setup('def', 'water')
print('meltt')
print(rp.meltt(273.15, [1]), '\n')
print('meltp')
print(rp.meltp(100, [1]), '\n')
print('sublt')
print(rp.sublt(273.15, [1]), '\n')
print('sublp')
print(rp.sublp(0.1, [1]), '\n')
rp.setup('def', 'butane', 'ethane', 'propane', 'methane',)
x = [0.5, 0.15, 0.3, 0.05]
rp.setref('nbp')
prop = rp.flsh('tp', 260, 150, x)
D = prop['D']
print('trnprp, setref')
print(rp.trnprp(260, D, x), '\n')
print('B12')
print(rp.b12(260, x), '\n')
print('chempot')
print(rp.chempot(260, D, x), '\n')
print('fugcof')
print(rp.fugcof(260, D, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('phiderv')
print(rp.phiderv(1, 260, D, x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('getmod')
print(rp.getmod(1, 'EOS'), '\n')
rp.setmod('tcx', 'ecs', ['tc2', 'tc1', 'tc2', 'tc2'])
rp.setup('def', 'butane', 'ethane', 'propane', 'methane',)
x = [0.5, 0.15, 0.3, 0.05]
prop = rp.flsh('tp', 260, 200, x)
print('trnprp, setref NBP, setmod [tcx, ecs, tc2, tc1, tc2, tc2]')
print(rp.trnprp(260, prop['D'], x), '\n')
#function not supported in Windows
if platform.system() == 'Linux':
print('getmod')
print(rp.getmod(3, 'tcx'), '\n')
rp.setref('oth', 1, [1], 0, 0, 273, 100)
print('setref = OTH')
prop = rp.flsh('tp', 260, 200, x)
print(prop, '\n')
resetup_test_prop_a = prop
rp.setref('???', 1, [1], 0, 0, 373, 100)
print('setref = ???')
prop = rp.flsh('tp', 260, 200, x)
print(prop, '\n')
resetup_test_prop_b = prop
print('name')
print(rp.name(1), '\n')
rp.setup('def', 'butane', 'ethane', 'propane', 'methane',)
x = [0.5, 0.15, 0.3, 0.05]
print('getktv')
prop = rp.getktv(1, 3)
print(prop, '\n')
print('setktv')
prop = rp.setktv(1, 3, 'lin', prop['fij'], prop['hfmix'],)
print(prop, '\n')
resetup_test_prop_c = prop
print('reset setktv')
print(rp.setktv(1, 2, 'rst'), '\n')
print('getfij')
print(rp.getfij('LIN'), '\n')
print('resetup_test_prop, setref, setmod')
print(resetup_test_prop_a, '\n')
print('resetup')
print(rp.resetup(resetup_test_prop_a), '\n')
print('resetup_test_prop, setref(???), setmod')
print(resetup_test_prop_b, '\n')
print('resetup')
print(rp.resetup(resetup_test_prop_b), '\n')
print('resetup_test_prop, setktv')
print(resetup_test_prop_c, '\n')
print('resetup')
print(rp.resetup(resetup_test_prop_c), '\n')
print('resetup_test_prop, purefld')
print(resetup_test_prop_d, '\n')
print('resetup')
print(rp.resetup(resetup_test_prop_d), '\n')
#normalize([0.2, 0.2, 0.1, 0.1])
print('normalize')
print(rp.normalize([0.2, 0.2, 0.1, 0.1]), '\n')
#setup_details
print('setup_details')
print(rp.setup_details({'hfld': ['BUTANE', 'ETHANE', 'PROPANE', 'METHANE'],
'D': 0.21683907260570098,
'Dvap': 0.09664613429889905, 'hfmix': 'HMX.BNC',
'setmod': {'hcomp': ['TC2', 'TC1', 'TC2', 'TC2'],
'htype': 'TCX', 'hmix': 'ECS'},
'cp': -9999980.0,
'xliq': [Decimal('0.7125650648765283717349528049'),
Decimal('0.04065955068790887177072495080'),
Decimal('0.2449672538076863186375885862'),
Decimal('0.001808130627876437856733658079')],
'xvap': [Decimal('0.2304027911956556081031262882'),
Decimal('0.2886769748808782463382744488'),
Decimal('0.3697982730402927396744896960'),
Decimal('0.1111219608831734058841095670')],
'x': [0.5, 0.15, 0.3, 0.05], 'e': -13828.39837781548,
'h': -12906.055381248256, 'nc': 4,
'Dliq': 11.150114864150222, 'cv': -9999980.0,
'q': 0.4408579356823604, 'p': 200.0,
's': -44.047682476988044, 't': 260.0, 'w': -9999980.0,
'kph': 1, 'setref': {'p0': 100, 'ixflag': 1, 'h0': 0,
's0': 0, 't0': 273,
'hrf': ['OTH', '???']},
'hrf': 'DEF'}), '\n')
#gerg04
print('gerg04 = 1')
rp.gerg04(1)
print(rp.setup('def', 'butane', 'ethane', 'propane'), '\n')
#reset gerg04
print('gerg04 = 0')
rp.gerg04(0)
print(rp.setup('def', 'butane', 'ethane', 'propane'), '\n')
#preos
print('preos = 2')
print(rp.preos(2), '\n')
print('preos = -1')
print(rp.preos(-1), '\n')
print('preos = 0')
print(rp.preos(0), '\n')
print('preos = -1')
print(rp.preos(-1), '\n')
#setaga
print('setaga')
print(rp.setaga(), '\n')
#unsetaga
print('unsetaga')
print(rp.unsetaga(), '\n')
#setup_settings
print('setup_setting')
print(rp.setup_setting(), '\n')
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"importPipelineName": _SERIALIZER.url("import_pipeline_name", import_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"importPipelineName": _SERIALIZER.url("import_pipeline_name", import_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-12-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"importPipelineName": _SERIALIZER.url("import_pipeline_name", import_pipeline_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ImportPipelinesOperations(object):
"""ImportPipelinesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
**kwargs: Any
) -> "_models.ImportPipeline":
"""Gets the properties of the import pipeline.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param import_pipeline_name: The name of the import pipeline.
:type import_pipeline_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImportPipeline, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_12_01_preview.models.ImportPipeline
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImportPipeline"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
import_pipeline_name=import_pipeline_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ImportPipeline', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
import_pipeline_create_parameters: "_models.ImportPipeline",
**kwargs: Any
) -> "_models.ImportPipeline":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImportPipeline"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(import_pipeline_create_parameters, 'ImportPipeline')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
import_pipeline_name=import_pipeline_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ImportPipeline', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ImportPipeline', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
import_pipeline_create_parameters: "_models.ImportPipeline",
**kwargs: Any
) -> LROPoller["_models.ImportPipeline"]:
"""Creates an import pipeline for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param import_pipeline_name: The name of the import pipeline.
:type import_pipeline_name: str
:param import_pipeline_create_parameters: The parameters for creating an import pipeline.
:type import_pipeline_create_parameters:
~azure.mgmt.containerregistry.v2019_12_01_preview.models.ImportPipeline
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either ImportPipeline or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2019_12_01_preview.models.ImportPipeline]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImportPipeline"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
import_pipeline_name=import_pipeline_name,
import_pipeline_create_parameters=import_pipeline_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ImportPipeline', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
import_pipeline_name=import_pipeline_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
import_pipeline_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes an import pipeline from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param import_pipeline_name: The name of the import pipeline.
:type import_pipeline_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
import_pipeline_name=import_pipeline_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines/{importPipelineName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.ImportPipelineListResult"]:
"""Lists all import pipelines for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImportPipelineListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2019_12_01_preview.models.ImportPipelineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImportPipelineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImportPipelineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/importPipelines'} # type: ignore
| |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import copy
import hashlib
import os
from fabric.api import env
from fabric.api import run
from fabric.api import settings
from oslo_config import cfg
from cloudferrylib.base.action import action
from cloudferrylib.os.actions import task_transfer
from cloudferrylib.utils.utils import forward_agent
from cloudferrylib.utils import utils as utl
from cloudferrylib.utils import qemu_img as qemu_img_util
CONF = cfg.CONF
CLOUD = 'cloud'
BACKEND = 'backend'
CEPH = 'ceph'
ISCSI = 'iscsi'
COMPUTE = 'compute'
INSTANCES = 'instances'
INSTANCE_BODY = 'instance'
INSTANCE = 'instance'
DIFF = 'diff'
EPHEMERAL = 'ephemeral'
DIFF_OLD = 'diff_old'
EPHEMERAL_OLD = 'ephemeral_old'
PATH_DST = 'path_dst'
HOST_DST = 'host_dst'
PATH_SRC = 'path_src'
HOST_SRC = 'host_src'
BACKING_FILE_DST = 'backing_file_dst'
TEMP = 'temp'
FLAVORS = 'flavors'
TRANSPORTER_MAP = {CEPH: {CEPH: 'SSHCephToCeph',
ISCSI: 'SSHCephToFile'},
ISCSI: {CEPH: 'SSHFileToCeph',
ISCSI: 'SSHFileToFile'}}
class TransportEphemeral(action.Action):
# TODO constants
def run(self, info=None, **kwargs):
info = copy.deepcopy(info)
# Init before run
new_info = {
utl.INSTANCES_TYPE: {
}
}
# Get next one instance
for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems():
is_ephemeral = instance[utl.INSTANCE_BODY]['is_ephemeral']
one_instance = {
utl.INSTANCES_TYPE: {
instance_id: instance
}
}
if is_ephemeral:
self.copy_ephemeral(self.src_cloud,
self.dst_cloud,
one_instance)
new_info[utl.INSTANCES_TYPE].update(
one_instance[utl.INSTANCES_TYPE])
return {
'info': new_info
}
@staticmethod
def delete_remote_file_on_compute(path_file, host_cloud,
host_instance):
with settings(host_string=host_cloud,
connection_attempts=env.connection_attempts):
with forward_agent(env.key_filename):
run("ssh -oStrictHostKeyChecking=no %s 'rm -rf %s'" %
(host_instance, path_file))
def copy_data_via_ssh(self, src_cloud, dst_cloud, info, body, resources,
types):
dst_storage = dst_cloud.resources[resources]
src_compute = src_cloud.resources[resources]
src_backend = src_compute.config.compute.backend
dst_backend = dst_storage.config.compute.backend
ssh_driver = (CONF.migrate.copy_backend
if CONF.migrate.direct_compute_transfer
else TRANSPORTER_MAP[src_backend][dst_backend])
transporter = task_transfer.TaskTransfer(
self.init,
ssh_driver,
resource_name=types,
resource_root_name=body)
transporter.run(info=info)
def copy_ephemeral(self, src_cloud, dst_cloud, info):
dst_storage = dst_cloud.resources[utl.COMPUTE_RESOURCE]
src_compute = src_cloud.resources[utl.COMPUTE_RESOURCE]
src_backend = src_compute.config.compute.backend
dst_backend = dst_storage.config.compute.backend
if (src_backend == CEPH) and (dst_backend == ISCSI):
self.copy_ephemeral_ceph_to_iscsi(src_cloud, dst_cloud, info)
elif (src_backend == ISCSI) and (dst_backend == CEPH):
self.copy_ephemeral_iscsi_to_ceph(src_cloud, info)
else:
self.copy_data_via_ssh(src_cloud,
dst_cloud,
info,
utl.EPHEMERAL_BODY,
utl.COMPUTE_RESOURCE,
utl.INSTANCES_TYPE)
self.rebase_diff(dst_cloud, info)
def copy_ephemeral_ceph_to_iscsi(self, src_cloud, dst_cloud, info):
transporter = task_transfer.TaskTransfer(
self.init,
TRANSPORTER_MAP[ISCSI][ISCSI],
resource_name=utl.INSTANCES_TYPE,
resource_root_name=utl.EPHEMERAL_BODY)
instances = info[utl.INSTANCES_TYPE]
temp_src = src_cloud.cloud_config.cloud.temp
host_dst = dst_cloud.cloud_config.cloud.ssh_host
qemu_img_dst = dst_cloud.qemu_img
qemu_img_src = src_cloud.qemu_img
temp_path_src = temp_src + "/%s" + utl.DISK_EPHEM
for inst_id, inst in instances.iteritems():
path_src_id_temp = temp_path_src % inst_id
host_compute_dst = inst[EPHEMERAL][HOST_DST]
inst[EPHEMERAL][
BACKING_FILE_DST] = qemu_img_dst.detect_backing_file(
inst[EPHEMERAL][PATH_DST], host_compute_dst)
self.delete_remote_file_on_compute(inst[EPHEMERAL][PATH_DST],
host_dst,
host_compute_dst)
qemu_img_src.convert(
utl.QCOW2,
'rbd:%s' % inst[EPHEMERAL][PATH_SRC], path_src_id_temp)
inst[EPHEMERAL][PATH_SRC] = path_src_id_temp
transporter.run(info=info)
for inst_id, inst in instances.iteritems():
host_compute_dst = inst[EPHEMERAL][HOST_DST]
qemu_img_dst.diff_rebase(inst[EPHEMERAL][BACKING_FILE_DST],
inst[EPHEMERAL][PATH_DST],
host_compute_dst)
def copy_ephemeral_iscsi_to_ceph(self, src_cloud, info):
instances = info[utl.INSTANCES_TYPE]
qemu_img_src = src_cloud.qemu_img
transporter = task_transfer.TaskTransfer(
self.init,
TRANSPORTER_MAP[ISCSI][CEPH],
resource_name=utl.INSTANCES_TYPE,
resource_root_name=utl.EPHEMERAL_BODY)
for inst_id, inst in instances.iteritems():
path_src = inst[EPHEMERAL][PATH_SRC]
path_src_temp_raw = path_src + "." + utl.RAW
host_src = inst[EPHEMERAL][HOST_SRC]
qemu_img_src.convert(utl.RAW,
path_src,
path_src_temp_raw,
host_src)
inst[EPHEMERAL][PATH_SRC] = path_src_temp_raw
transporter.run(info=info)
@staticmethod
def rebase_diff(dst_cloud, info):
for instance_id, obj in info[utl.INSTANCES_TYPE].items():
image_id = obj['instance']['image_id']
new_backing_file = hashlib.sha1(image_id).hexdigest()
diff = obj['diff']
host = diff['host_dst']
qemu_img = qemu_img_util.QemuImg(dst_cloud.config.dst,
dst_cloud.config.migrate,
host)
diff_path = diff['path_dst']
backing_path = qemu_img.detect_backing_file(diff_path, None)
backing_dir = os.path.dirname(backing_path)
new_backing_path = os.path.join(backing_dir, new_backing_file)
qemu_img.diff_rebase(new_backing_path, diff_path)
| |
import json
import socket
from rejected import consumer, testing
from tornado import concurrent, gen
import mock
from vetoes import config, service
class Consumer(service.HTTPServiceMixin,
consumer.Consumer):
def __init__(self, *args, **kwargs):
kwargs['service_map'] = {'fetch-stats': 'httpbin'}
super(Consumer, self).__init__(*args, **kwargs)
self.method = 'GET'
self.request_body = None
self.request_json = None
@gen.coroutine
def process(self):
yield self.call_http_service('fetch-stats', self.method, 'stats',
**{'body': self.request_body,
'json': self.request_json})
def get_service_url(self, service, *path, **kwargs):
return self.settings.get('service_url',
'http://httpbin.org/status/200')
class ConfigurableConsumer(config.TimeoutConfigurationMixin, Consumer):
pass
class HTTPServiceMixinTests(testing.AsyncTestCase):
def setUp(self):
super(HTTPServiceMixinTests, self).setUp()
self.consumer.http = mock.Mock()
self.http_response = mock.Mock(code=200, request_time=0)
self.consumer.http.fetch.return_value = concurrent.Future()
self.consumer.http.fetch.return_value.set_result(self.http_response)
def get_consumer(self):
return Consumer
@testing.gen_test
def test_that_sentry_context_is_managed(self):
with mock.patch.multiple(self.consumer,
set_sentry_context=mock.DEFAULT,
unset_sentry_context=mock.DEFAULT) as context:
yield self.process_message()
context['set_sentry_context'].assert_called_once_with(
'service_invoked', 'httpbin')
context['unset_sentry_context'].assert_called_once_with(
'service_invoked')
@testing.gen_test
def test_that_metrics_are_emitted(self):
measurement = yield self.process_message()
self.assertIn('http.fetch-stats.200', measurement.values)
self.assertEqual(measurement.values['http.fetch-stats.200'],
self.http_response.request_time)
@testing.gen_test
def test_that_timeout_result_in_processing_exceptions(self):
self.http_response.code = 599
with self.assertRaises(consumer.ProcessingException):
measurement = yield self.process_message()
self.assertEqual(measurement.values['http.fetch-stats.599'],
self.http_response.request_time)
@testing.gen_test
def test_that_rate_limiting_result_in_processing_exceptions(self):
self.http_response.code = 429
with self.assertRaises(consumer.ProcessingException):
measurement = yield self.process_message()
self.assertEqual(measurement.values['http.fetch-stats.429'],
self.http_response.request_time)
@testing.gen_test
def test_that_call_http_service_accepts_body(self):
self.consumer.method = 'POST'
self.consumer.request_body = mock.sentinel.body
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
self.consumer.get_service_url('fetch-stats'),
headers={'Correlation-Id': self.correlation_id},
method='POST', body=mock.sentinel.body, raise_error=False)
@testing.gen_test
def test_that_call_http_service_jsonifies(self):
self.consumer.method = 'POST'
self.consumer.request_json = {'one': 1}
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
self.consumer.get_service_url('fetch-stats'),
method='POST', body=json.dumps({'one': 1}).encode('utf-8'),
headers={'Content-Type': 'application/json',
'Correlation-Id': self.correlation_id},
raise_error=False)
@testing.gen_test
def test_that_socket_errors_result_in_processing_exception(self):
future = concurrent.Future()
future.set_exception(socket.error(42, 'message'))
self.consumer.http.fetch.return_value = future
with self.assertRaises(consumer.ProcessingException):
yield self.process_message()
self.assertGreater(
self.consumer._measurement.values['http.fetch-stats.timeout'],
self.http_response.request_time)
self.assertEqual(
self.consumer._measurement.counters['errors.socket.42'], 1)
@testing.gen_test
def test_that_raise_error_can_be_overridden(self):
self.http_response.code = 500
self.http_response.rethrow.side_effect = RuntimeError
response = yield self.consumer.call_http_service(
'fetch-stats', 'GET', raise_error=False)
self.consumer.http.fetch.assert_called_once_with(
self.consumer.get_service_url('fetch-stats'),
method='GET', raise_error=False)
self.assertIs(response, self.http_response)
@testing.gen_test
def test_that_url_kwarg_skips_service_lookup(self):
with mock.patch.multiple(self.consumer,
set_sentry_context=mock.DEFAULT,
unset_sentry_context=mock.DEFAULT) as context:
response = yield self.consumer.call_http_service(
'frobinicate', 'GET', url='https://google.com')
self.consumer.http.fetch.assert_called_once_with(
'https://google.com', method='GET', raise_error=False)
self.assertIs(response, self.http_response)
context['set_sentry_context'].assert_called_once_with(
'service_invoked', 'frobinicate')
context['unset_sentry_context'].assert_called_once_with(
'service_invoked')
@testing.gen_test
def test_that_auth_parameters_are_detected_in_service_url(self):
self.consumer.settings['service_url'] = 'http://foo:bar@example.com'
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
'http://example.com', method='GET', raise_error=mock.ANY,
body=mock.ANY, headers=mock.ANY,
auth_username='foo', auth_password='bar')
@testing.gen_test
def test_that_auth_parameters_are_detected_in_overridden_url(self):
with mock.patch.multiple(self.consumer,
set_sentry_context=mock.DEFAULT,
unset_sentry_context=mock.DEFAULT) as context:
response = yield self.consumer.call_http_service(
'frobinicate', 'GET',
url='http://foo:bar@example.com:8080/path')
self.consumer.http.fetch.assert_called_once_with(
'http://example.com:8080/path',
method='GET', raise_error=False,
auth_username='foo', auth_password='bar',
)
self.assertIs(response, self.http_response)
@testing.gen_test
def test_that_auth_parameters_are_urldecoded(self):
self.consumer.settings['service_url'] = 'http://foo:fu%3au@example.com'
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
'http://example.com', method='GET', raise_error=mock.ANY,
body=mock.ANY, headers=mock.ANY,
auth_username='foo', auth_password='fu:u')
@testing.gen_test
def test_that_auth_keywords_are_preferred(self):
with mock.patch.multiple(self.consumer,
set_sentry_context=mock.DEFAULT,
unset_sentry_context=mock.DEFAULT) as context:
response = yield self.consumer.call_http_service(
'frobinicate', 'GET', url='http://example.com:8080/path',
auth_username='foo', auth_password='bar')
self.consumer.http.fetch.assert_called_once_with(
'http://example.com:8080/path',
method='GET', raise_error=False,
auth_username='foo', auth_password='bar',
)
self.assertIs(response, self.http_response)
@testing.gen_test
def test_that_auth_does_not_require_password(self):
self.consumer.settings['service_url'] = 'http://foo@example.com'
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
'http://example.com', method='GET', raise_error=mock.ANY,
body=mock.ANY, headers=mock.ANY,
auth_username='foo', auth_password=None)
@testing.gen_test
def test_that_auth_does_not_require_username(self):
# Not sure of the use case here but ... whatever
self.consumer.settings['service_url'] = 'http://:foo@example.com'
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
'http://example.com', method='GET', raise_error=mock.ANY,
body=mock.ANY, headers=mock.ANY,
auth_username=None, auth_password='foo')
class TimeoutConfiguredTests(testing.AsyncTestCase):
def setUp(self):
super(TimeoutConfiguredTests, self).setUp()
self.consumer.http = mock.Mock()
self.http_response = mock.Mock(code=200, request_time=0)
self.consumer.http.fetch.return_value = concurrent.Future()
self.consumer.http.fetch.return_value.set_result(self.http_response)
def get_consumer(self):
return ConfigurableConsumer
@testing.gen_test
def test_that_timeout_is_passed_through(self):
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
mock.ANY, headers=mock.ANY, method=mock.ANY, body=mock.ANY,
raise_error=False,
request_timeout=self.consumer.get_timeout('default'))
@testing.gen_test
def test_that_timeout_can_be_configured_by_function(self):
self.consumer.settings.setdefault('timeouts', {})
self.consumer.settings['timeouts']['fetch-stats'] = 1234.5
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
mock.ANY, headers=mock.ANY, method=mock.ANY, body=mock.ANY,
raise_error=False, request_timeout=1234.5)
@testing.gen_test
def test_that_timeout_can_be_configured_by_service(self):
self.consumer.settings.setdefault('timeouts', {})
self.consumer.settings['timeouts']['httpbin'] = 1234.5
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
mock.ANY, headers=mock.ANY, method=mock.ANY, body=mock.ANY,
raise_error=False, request_timeout=1234.5)
@testing.gen_test
def test_that_function_timeout_is_preferred(self):
self.consumer.settings.setdefault('timeouts', {})
self.consumer.settings['timeouts']['fetch-stats'] = 1234.5
self.consumer.settings['timeouts']['httpbin'] = 9876.5
yield self.process_message()
self.consumer.http.fetch.assert_called_once_with(
mock.ANY, headers=mock.ANY, method=mock.ANY, body=mock.ANY,
raise_error=False, request_timeout=1234.5)
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines how to handle lineages.
"""
from __future__ import print_function
import numpy as np
import random
from tunacell.base.datatools import Coordinates
from tunacell.base.timeseries import TimeSeries
from tunacell.base.observable import Observable, FunctionalObservable
class LineageError(Exception):
pass
class LineageTimeseriesError(Exception):
pass
class NoAncestry(Exception):
pass
class Lineage(object):
"""General class to handle lineage data.
Suitable for various conditioning methods.
Parameters
----------
tree : :class:`Colony` instance
identifier_sequence : sequence of cell identifiers
this is the (ordered) sequence of cells that defines the lineage
Attributes
----------
colony : :class:`Colony` instance
used to derive lineage
idseq : list of identifiers
Methods
-------
get_generations(tref=None)
Returns generation indices of cell sequence.
get_boolean_tests(cset=[])
Performs boolean test over cells in the sequence
get_timeseries(obs, cset=[])
Returns :class:`Timeseries` instance of :class:`Observable`
:param:`obs` in current lineage
"""
def __init__(self, tree, identifier_sequence):
self.colony = tree
self.idseq = identifier_sequence
self.cellseq = [tree.get_node(cid) for cid in self.idseq] # loops
self.division_timings = get_division_timing(self.idseq, tree)
return
# DEPRECATED: CHECK WHERE IT IS USED
def iter_cells(self, shuffle=False):
"""Iterator over cells in current lineage."""
idseq = self.idseq
if shuffle:
idseq = random.shuffle(idseq)
for cid in idseq:
yield self.colony.get_node(cid)
return
def get_generations(self, tref=None):
"""Get generation number
Parameter
---------
tref : float (default None)
optional to fix reference for generation count
Returns
-------
list of int
generation label within colony when tref=None, generation index
since generation 0 at t=tref.
Raises
------
NoAncestry
when tref is provided and no ancestry crosses tref.
"""
gens = []
genref = 0
for cid in self.idseq:
gens.append(self.colony.level(cid))
gens = np.array(gens, dtype='i4')
if tref is not None:
from tunacell.filters.cells import FilterTimeInCycle
check = FilterTimeInCycle(tref=tref)
found = False
# search from last cell in the past until root
cidup = self.colony.rsearch(self.idseq[-1])
# note that self.idseq[-1] is a colony leaf
for cid in cidup:
cell = self.colony.get_node(cid)
if check(cell):
found = True
genref = self.colony.level(cid)
break
if not found:
msg = 'No cell found in ancestry at {}'.format(tref)
raise NoAncestry(msg)
return gens - genref
def get_boolean_tests(self, cset=[]):
"""Set the dictionary of conditions for a given cset
Parameters
----------
cset : sequence of :class:`FilterSet` instances (default [])
Returns
-------
select_ids : dict
keys: 'master' + each of cset item string representation (repr),
values: sequence of booleans, where index in sequence corresponds
to index of each cell in self.idseq
Notes
-----
* 'master' is added, where every test is True
* call this function AFTER timeseries have been built, so that
filters acting on Observables can work (computation of
corresponding cell._sdata is being performed in .get_timeseries(obs))
"""
# initialize select_ids
# add the master entry (which will be a sequence of True values)
select_ids = {}
# master mask gets all True
select_ids['master'] = np.array([True for _ in self.idseq])
# add as many entries as there are conditions
for fset in cset:
# we have to make a logical AND between different filter types
col = self.colony
cont = col.container
# check True for upstream structures: container, colony, lineage
boo = (fset.container_filter(cont) and
fset.colony_filter(col) and
fset.lineage_filter(self))
# cell selections
# initialize all to False
arrbool = np.array(len(self.idseq) * [False, ])
# perform tests only if upstream tests where True
if boo:
for index, cell in enumerate(self.cellseq):
arrbool[index] = fset.cell_filter(cell)
select_ids[repr(fset)] = arrbool
return select_ids
def get_timeseries(self, obs, raw_obs=[], func_obs=[], cset=[]):
"""Contructs timeseries.
Parameters
----------
obs : :class:`Observable` or :class:`FunctionalObservable` instance
must be an item of raw_obs or an item of func_obs
raw_obs : list of :class:`Observable` instances
needed to be computed for filtering or in the case of FunctionalObservable
func_obs : list of :class:`FunctionalObservable` instances
needed to be computed for filtering
cset: sequence of :class:`FilterSet` instances (default [])
Returns
-------
:class:`TimeSeries` instance
corresponding to obs argument
"""
label = obs.label # complicated string
if obs.name is not None:
obs_name = obs.name # simpler string if provided by user
else:
obs_name = label
# obs must be either a member of raw_obs, or a member of func_obs
if isinstance(obs, Observable):
if obs not in raw_obs:
raw_obs.append(obs)
elif isinstance(obs, FunctionalObservable):
if obs not in func_obs:
func_obs.append(obs)
else:
raise TypeError('obs must be one of {Observable, FunctionalObservable}')
# compute timelapsed raw obs for all cells in lineage
for cell in self.cellseq:
for sobs in raw_obs:
cell.build(sobs.as_timelapse())
# now that all timelapse observables have been computed, there cannot
# be overlap between different cell in data evaluation,
#and we protect against future build
time_bounds = []
for cell in self.cellseq:
# compute those that are of cell-cycle mode
for sobs in raw_obs:
if sobs.mode != 'dynamics':
cell.build(sobs)
# protect against future build for raw observable
cell.protect_against_build(sobs)
for fobs in func_obs:
cell.build(fobs)
cell.protect_against_build(fobs)
# collect make time bounds
if cell.birth_time is not None:
tleft = cell.birth_time
elif len(cell.data) > 0:
tleft = np.amin(cell.data['time'])
else:
tleft = np.infty
if cell.division_time is not None:
tright = cell.division_time
elif len(cell.data) > 0:
tright = np.amax(cell.data['time'])
else:
tright = - np.infty
time_bounds.append((tleft, tright))
# boolean tests
select_ids = self.get_boolean_tests(cset)
arrays = []
index_cycles = []
colony = self.colony
container = colony.container
# at this point all _sdata are ready for action. Distinguish modes
if obs.mode == 'dynamics':
# get time reference for translating times
if obs.tref is not None:
if obs.tref == 'root':
root = colony.get_node(colony.root)
if root.data is not None and len(root.data) > 0:
tref = root.data['time'][-1] # last time of root
else:
tref = 0.
elif isinstance(obs.tref, float) or isinstance(obs.tref, int):
tref = float(obs.tref)
else:
tref = 0.
else:
tref = 0.
# build array
count = 0
for cell in self.cellseq:
if len(cell.data) > 0:
coords = Coordinates(cell.data['time'] - tref,
cell._sdata[label],
x_name='time',
y_name=obs_name)
arrays.append(coords.clear.as_array()) # remove NaNs
size = len(arrays[-1])
index_cycles.append((count, count + size - 1))
count += size
else:
index_cycles.append(None)
ts = np.concatenate(arrays)
coords = Coordinates.from_array(ts)
# otherwise it's of 'cycle' mode
else:
for index, cell in enumerate(self.cellseq):
if obs.timing == 'g':
try:
gens = self.get_generations(tref=obs.tref)
tt = gens[index]
except NoAncestry:
# return empty TimeSeries
# TODO : should be filtered upstream?
coords = Coordinates([], [], x_name='g', y_name=obs.name)
new = TimeSeries(ts=coords, ids=self.idseq[:],
index_cycles=[None for _ in self.idseq],
select_ids=select_ids,
container_label=container.label,
experiment_label=container.exp.label)
return new
# time value
elif obs.timing == 'b':
tt = cell.birth_time
elif obs.timing == 'd':
tt = cell.division_time
elif obs.timing == 'm':
try:
tt = (cell.division_time + cell.birth_time)/2.
except TypeError:
tt = None
# append new value
if tt is None:
tt = np.nan
arrays.append((tt, cell._sdata[label]))
index_cycles.append((index, index))
if len(arrays) == 0:
coords = Coordinates([], [], x_name=obs.timing, y_name=obs.name)
else:
coords = Coordinates(*list(zip(*arrays)), x_name=obs.timing, y_name=obs.name)
timeseries = TimeSeries(ts=coords, ids=self.idseq[:],
time_bounds=time_bounds,
index_cycles=index_cycles,
select_ids=select_ids,
container_label=container.label,
experiment_label=container.exp.label)
return timeseries
def split(self):
"Returns slices to retrieve each cell's data"
slices = []
start = 0
refid = self.data[0]['cellID']
for index, line in enumerate(self.data[1:], start=1):
cid = line['cellID']
if cid != refid:
stop = index
slices.append(slice(start, stop))
start = index
refid = cid
stop = None
slices.append(slice(start, stop))
return slices
def __repr__(self):
label = ''
if self.colony is not None:
if self.colony.container is not None:
label += 'From Container: {}'.format(self.colony.container.label)
label += ', colony with root id: {}'.format(self.colony.root)
label += '\nCell ids: {}'.format(' > '.join(self.idseq)) + '\n'
return label
def get_division_timing(idseq, tree):
timings = []
for cid in idseq:
node = tree.get_node(cid)
timings.append(node.division_time)
return timings
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.ml
~~~~~~~~~~~~~~~~~~
Lexers for ML family languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer']
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
.. versionadded:: 1.5
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun']
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = set((
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
))
symbolicid_reserved = set((
# Core
':', '\|', '=', '=>', '->', '#',
# Modules
':>',
))
nonid_reserved = set(('(', ')', '[', ']', '{', '}', ',', ';', '...', '_'))
alphanumid_re = r"[a-zA-Z][\w']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erronous
# multiple-character string.
def stringy(whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
else:
token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved:
token = Error
elif match.group(1) in self.symbolicid_reserved:
token = Error
else:
token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved:
token = Keyword.Reserved
elif str in self.symbolicid_reserved:
token = Punctuation
else:
token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|{', Punctuation, 'main'),
(r'\)|\]|}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[\w\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [
default('main')
],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
default('#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
default('#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
default('#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
include('breakout'),
include('core'),
(r'\S+', Error),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
.. versionadded:: 0.7
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = (
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
)
keyopts = (
'!=', '#', '&', '&&', '\(', '\)', '\*', '\+', ',', '-',
'-\.', '->', '\.', '\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', '>}', '\?', '\?\?', '\[', '\[<', '\[>', '\[\|',
']', '_', '`', '{', '{<', '\|', '\|]', '}', '~'
)
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or')
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array')
tokens = {
'escape-sequence': [
(r'\\[\\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Bin),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name.Class, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
],
}
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
.. versionadded:: 1.5
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = (
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
)
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@' + ident_re + r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'{', Operator, '#push'),
(r'}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?={)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
default(('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
default('#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
default(('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
default('#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
default('#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
default('#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^\(\)/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'{', Keyword.Type, '#push'),
(r'}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# default(('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?={)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
| |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
'''Abstract Syntax Tree. Each node is an operator or operand.'''
import collections
import uuid
from abc import ABCMeta, abstractmethod
from typing import (Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Set, # noqa: F401
Tuple, Union, cast)
import pandas as pd
import six
from .bq_types import BQScalarType, BQType, TypedDataFrame, TypedSeries # noqa: F401
from .storage import TableContext
NoneType = type(None)
DatasetType = Dict[str, Dict[str, Dict[str, TypedDataFrame]]]
# Table name for columns that come from evaluating selectors and intermediate expressions.
_SELECTOR_TABLE = '__selector__'
class AbstractSyntaxTreeNode(object):
'''Base class for AST nodes.'''
def __repr__(self):
# type: () -> str
'''Returns a string representation of this object
Examples:
_EmptyNode()
Value(type_=BQScalarType.STRING, value='something')
Returns:
String representation of object
'''
return '{}({})'.format(
self.__class__.__name__,
', '.join(sorted('{}={!r}'.format(key, value)
for key, value in six.iteritems(vars(self))
if value is not EMPTY_NODE)))
def strexpr(self):
# type: () -> str
'''Return a prefix-expression serialization for testing purposes.'''
return '({} {})'.format(self.__class__.__name__.upper(),
' '.join(a.strexpr() for a in six.itervalues(vars(self))))
@classmethod
def literal(cls):
# type: () -> Optional[str]
'''Returns the string that signals the start of an expression.
For example, a Select class would return "SELECT", while Value would
return None, because there is no literal that should precede a
number/string/etc.
'''
return None
# These type definitions must live here, after AbstractSyntaxTreeNode is defined
AppliedRuleNode = Union[str,
AbstractSyntaxTreeNode,
NoneType,
Tuple] # actually Tuple[AppliedRuleNode]; mypy can't do recursive types :(
AppliedRuleOutputType = Tuple[AppliedRuleNode, List[str]]
# Should include Tuple[RuleType] and List[RuleType] but mypy doesn't fully
# support recursive types yet.
RuleType = Union[str, Tuple[Any, ...], List[Any], Callable[[List[str]], AppliedRuleOutputType]]
class MarkerSyntaxTreeNode(AbstractSyntaxTreeNode):
'''Parent class for abstract syntax tree nodes whose syntax starts with the class name.
See AbstractSyntaxTreeNode.literal()'''
@classmethod
def literal(cls):
# type: () -> Optional[str]
return cls.__name__.upper()
class EvaluatableNode(AbstractSyntaxTreeNode):
'''Abstract base class for syntax tree nodes that can be evaluated to return a column of data'''
__metaclass__ = ABCMeta
@abstractmethod
def evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedDataFrame, TypedSeries]
'''Generates a new table or column based on applying the instance's
fields to the given context.
This method should never be overridden by concrete subclasses.
Args:
context: The tables that are involved in this query
Returns:
A new table (TypedDataFrame) or column (TypedSeries)
'''
@abstractmethod
def pre_group_by_partially_evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedSeries, EvaluatableNode]
'''Partially evaluates an expression to prepare for an upcoming GROUP BY.
This function evaluates expressions that are contained within an aggregation or a grouped by
field, and stores the results of these in the context to be referred to later. It is used
as part of a two-pass evaluation to implement GROUP BY -- see EvaluationContext.do_group_by
for more detail.
This function should never be overriden by concrete subclasses.
Args:
context: The tables that are involved in this query
Returns:
A fully evaluated column or an unevaluated expression (an EvaluatableNode)
'''
def name(self):
# type: () -> Optional[str]
'''Returns a name for this expression, or None if no name can be inferred.
Returns:
The DataFrame returned from a select statement needs a name for every column. This
function returns a name that will be used for this expression if the user doesn't
provide an explicit alias with AS. If the expression doesn't have an obvious name (most
expressions), it should return None, and the outermost expression (Selector) will
generate one.
'''
return None
@abstractmethod
def mark_grouped_by(self, group_by_paths, context):
# type: (Sequence[Tuple[str, ...]], EvaluationContext) -> EvaluatableNode
'''Returns a new syntax tree rooted at the current one, marking fields that are grouped by.
Args:
group_by_paths: Canonicalized paths of the columns that are grouped by
context: Context to evaluate in (for canonicalizing)
'''
def is_constant(self):
# type: () -> bool
'''Returns true if this expression is the same when evaluated in any context.'''
return True
def is_aggregated(self):
# type: () -> bool
'''Returns true if this expression contains any aggregation.'''
return False
class EvaluatableLeafNode(EvaluatableNode):
'''Abstract Syntax Tree Node that can be evaluated and has no child nodes.'''
def evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedSeries, TypedDataFrame]
'''See docstring in EvaluatableNode.evaluate'''
return self._evaluate_leaf_node(context)
def pre_group_by_partially_evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedSeries, EvaluatableNode]
'''See docstring in EvaluatableNode.pre_group_by_partially_evaluate'''
result = self._evaluate_leaf_node(context)
if isinstance(result, TypedDataFrame):
# TODO: This codepath can be hit in a query like SELECT * GROUP BY
# Such a query can actually be valid if every *-selected field is
# grouped by. Support this.
raise ValueError("Cannot partially evaluate {!r}".format(self))
return result
@abstractmethod
def _evaluate_leaf_node(self, context):
# type: (EvaluationContext) -> Union[TypedDataFrame, TypedSeries]
'''Computes a new column from this childless node in the provided context.
This method must be overriden by all subclasses.
Args:
context: EvaluationContext to evaluate this expression in
Returns:
A table or column of data.
'''
class EvaluatableNodeWithChildren(EvaluatableNode):
'''Abstract Syntax Tree node that can be evaluated, based on child nodes.'''
def __init__(self, children):
# type: (Sequence[EvaluatableNode]) -> None
self.children = children
def pre_group_by_partially_evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedSeries, EvaluatableNode]
'''See docstring in EvaluatableNode.pre_group_by_partially_evaluate'''
evaluated_children = [child.pre_group_by_partially_evaluate(context)
for child in self.children]
# Expressions whose children are fully evaluated can be fully evaluated themselves.
# For example, these expressions will fully evaluate into a column of numbers (a
# TypedSeries).
# - a * 2
# - concat(foo, "/", bar)
if all(isinstance(child, TypedSeries) for child in evaluated_children):
return self._evaluate_node(cast(List[TypedSeries], evaluated_children))
# Expressions whose children are not fully evaluated should not keep the caches of partial
# evaluation; not being fully evaluated means that this expression is outside of
# aggregation, and so will be evaluated in the second pass.
# For example, consider this expression: 2 + max(b * c).
# pre_group_by_partially_evaluate can't evaluate the 'max' function yet, because that needs
# to happen after the group_by. So, the 'max' will defer evaluation and return an
# EvaluatableNode (see EvaluatableNodeThatAggregatesOrGroups.pre_group_by_partially_evaluate
# below), but the '2' will return an actual column of twos (a TypedSeries). We don't
# want those twos to get grouped, because we need to add them column to the result of
# max and we can't add a SeriesGroupBy to a Series. So, we ignore the evaluation result of
# any column that is fully evaluated, and just keep the original column node instead.
else:
return self.copy([
evaluated_child if isinstance(evaluated_child, EvaluatableNode) else child
for child, evaluated_child in zip(self.children, evaluated_children)])
def _ensure_fully_evaluated(self, evaluated_children):
# type: (List[Any]) -> List[TypedSeries]
'''Ensure evaluated_children are fully evaluated; raise ValueError if not.'''
if not all(isinstance(child, TypedSeries) for child in evaluated_children):
raise ValueError(
"In order to evaluate {}, all children must be evaluated.".format(self))
return cast(List[TypedSeries], evaluated_children)
def evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedDataFrame, TypedSeries]
'''Generates a new table or column based on applying the instance's
fields to the given context.
This method should never be overridden by subclasses.
Args:
context: The tables that are involved in this query
Returns:
A new table (TypedDataFrame) or column (TypedSeries)
'''
evaluated_children = [child.evaluate(context) for child in self.children]
return self._evaluate_node(self._ensure_fully_evaluated(evaluated_children))
@abstractmethod
def _evaluate_node(self, evaluated_children):
# type: (List[TypedSeries]) -> TypedSeries
'''Computes a new column based on the evaluated arguments to this expression.
This method must be overriden by all subclasses.
Args:
evaluated_children: The already-evaluated children of this node.
Returns:
A new column (TypedSeries)
'''
@abstractmethod
def copy(self, new_children):
# type: (Sequence[EvaluatableNode]) -> EvaluatableNode
'''Creates a new version of the current object with new, different children
EvaluatableNode subclasses have two kinds of state - the children, the nodes below this one
in the tree, and other state, like the specific binary operator or the particular function
that a node invokes. This method creates a new object that changes out the children
for different nodes, but keeps the other state, allowing for operations that rewrite the
abstract syntax tree.
This method must be overriden by all subclasses.
Args:
new_children: Abstract syntax tree nodes to use as the children of the new node.
Returns:
A new node.
'''
def mark_grouped_by(self, group_by_paths, context):
# type: (Sequence[Tuple[str, ...]], EvaluationContext) -> EvaluatableNode
'''Returns a new syntax tree rooted at the current one, marking fields that are grouped by.
Args:
group_by_paths: Canonicalized paths of the columns that are grouped by
context: Context to evaluate in (for canonicalizing)
'''
return self.copy([child.mark_grouped_by(group_by_paths, context)
for child in self.children])
def is_constant(self):
# type: () -> bool
'''Returns true if this expression is the same when evaluated in any context.'''
return all(child.is_constant() for child in self.children)
def is_aggregated(self):
# type: () -> bool
'''See parent class for docstring.'''
return any(child.is_aggregated() for child in self.children)
class EvaluatableNodeThatAggregatesOrGroups(EvaluatableNodeWithChildren):
'''Abstract Syntax Tree node that can be evaluated that aggregates child nodes.
Subclasses represent expressions like count, max, or sum that aggregate results of multiple
rows into one row.
GroupedBy is also a subclass; this expression node wraps Fields that are listed in GROUP BY and
are not themselves aggregated.
'''
def pre_group_by_partially_evaluate(self, context):
# type: (EvaluationContext) -> Union[TypedSeries, EvaluatableNode]
'''See docstring in EvaluatableNode.pre_group_by_partially_evaluate'''
# aggregating and grouped by expressions must fully evaluate children, then we cache
# those expressions into the context and return a new AST node that computes the aggregate
# over lookups from the cache (which will be grouped in the second-pass evaluation).
evaluated_children = self._ensure_fully_evaluated(
[child.pre_group_by_partially_evaluate(context)
for child in self.children])
return self.copy(
[Field(context.maybe_add_column(evaluated_child))
for evaluated_child in evaluated_children])
def mark_grouped_by(self, group_by_paths, context):
# type: (Sequence[Tuple[str, ...]], EvaluationContext) -> EvaluatableNode
'''Returns a new syntax tree rooted at the current one, marking fields that are grouped by.
Args:
group_by_paths: Canonicalized paths of the columns that are grouped by
context: Context to evaluate in (for canonicalizing)
'''
# If columns that are grouped by appear inside of an aggregating expression, they will
# be aggregated by that expression, so we should not mark them as grouped by.
return self
def is_constant(self):
# type: () -> bool
'''Any expression that aggregates may not be constant once grouping is applied.'''
return False
def is_aggregated(self):
# type: () -> bool
'''See parent class for docstring.'''
return True
class Result(object):
'''Result of executing a query or statement.'''
def __init__(self, statement_type, path=None, table=None):
# type: (str, Optional[Sequence[str]], Optional[TypedDataFrame]) -> None
'''Constructs a Result.
Args:
statement_type: Which statement was executed.
path: If applicable, a table that was created.
table: If applicable, the result of a query
'''
self.statement_type = statement_type
self.path = path
self.table = table
class DataframeNode(AbstractSyntaxTreeNode):
'''Abstract Syntax Tree Nodes that have a get_dataframe() method.
This node represents a syntactic object that can be selected FROM or that
can be JOINed to another DataframeNode.
'''
def get_dataframe(self, table_context, outer_context=None):
# type: (TableContext, Optional[EvaluationContext]) -> Tuple[TypedDataFrame, Optional[str]]
'''Scope the given datasets by the criteria specified in the
instance's fields.
Args:
datasets: All the tables in the database
outer_context: Context of a containing query (e.g. an EXISTS expression)
Returns:
Tuple of the resulting table (TypedDataFrame) and a name for
this table
'''
def execute(self, table_context):
# type: (TableContext) -> Result
'''Executes the query
Args:
table_context: the currently existing datasets in the environment.
Returns:
the result of executing it.
'''
table, unused_name = self.get_dataframe(table_context)
return Result('SELECT', table=table)
class GroupedBy(EvaluatableNodeThatAggregatesOrGroups):
'''One of the columns grouped by.
When GROUP BY is used, every column that is SELECTed must either be aggregated (min, max, etc),
be a constant, or be one of the columns grouped by, otherwise it might have multiple values
across the group, which doesn't make sense to select one of. In this implementation, this
manifests as follows: when a column is evaluated, the expressions are pandas SeriesGroupBy
objects, and those need to be either aggregated into Serieses, or marked with a GroupedBy node
as being one of the columns grouped by. A GroupedBy node's child will evaluate to an expression
constant within its group (by definition) but we need to explicitly convert it to a Series
containing those constant elements.
'''
def __init__(self, expression):
self.children = [expression]
def copy(self, new_children):
# type: (Sequence[EvaluatableNode]) -> GroupedBy
return GroupedBy(new_children[0])
def _evaluate_node(self, evaluated_children):
# type: (List[TypedSeries]) -> TypedSeries
evaluated_expression, = evaluated_children
if not evaluated_expression.series.min().equals(evaluated_expression.series.max()):
raise ValueError("Field {} should be constant within group but it varies {}"
.format(self, evaluated_expression))
return TypedSeries(evaluated_expression.series.min(), evaluated_expression.type_)
class Field(EvaluatableLeafNode):
'''A reference to a column in a table.
For example, in a table Table with columns A, B, and C,
a valid Field path would be ('B',) or ('Table', 'A').
'''
def __init__(self, path):
# type: (Tuple[str, ...]) -> None
'''Set up Field node
Args:
path: A tuple of strings describing the path to this field
(just the column name, or table.column)
'''
self.path = path
def strexpr(self):
# type: () -> str
return '.'.join(self.path)
def mark_grouped_by(self, group_by_paths, context):
# type: (Sequence[Tuple[str, ...]], EvaluationContext) -> EvaluatableNode
if context.get_canonical_path(self.path) in group_by_paths:
return GroupedBy(self)
return self
def name(self):
# type: () -> Optional[str]
return self.path[-1]
def _evaluate_leaf_node(self, context):
# type: (EvaluationContext) -> TypedSeries
# Retrieves the column from the context
result = context.lookup(self.path)
result.series.name = self.name()
return result
def __eq__(self, other):
# type: (Any) -> bool
if isinstance(other, Field):
return self.path == other.path
return False
def is_constant(self):
# type: () -> bool
return False
class _EmptyNode(AbstractSyntaxTreeNode):
'''An Empty syntax tree node; represents an optional element not present.'''
def strexpr(self):
# type: () -> str
return 'null'
EMPTY_NODE = _EmptyNode()
class EvaluationContext:
'''Context for resolving a name to a column (TypedSeries).
An EvaluationContext is a representation of columns in all the tables that
are in the query's scope. Typically used in the context of SELECT, WHERE, etc.
Contrast with TableContext, whose purpose is to resolve a name to a table (TypedDataFrame),
and contains all the tables (all the data) that is available in the database.
'''
def __init__(self, table_context):
# type: (TableContext) -> None
'''Initialize a context.
Args:
table_context: All tables visible to be queried.
'''
# We don't want an actual empty dataframe because with no FROMed tables, we
# still want to return a single row of results.
self.table = TypedDataFrame(pd.DataFrame([[1]]), [None]) # type: TypedDataFrame
# Mapping of column ID (prefixed by table ID) to its type
self.canonical_column_to_type = {} # type: Dict[str, BQType]
# Mapping of column IDs to list of table IDs to which they belong
self.column_to_table_ids = collections.defaultdict(list) # type: Dict[str, List[str]]
# Mapping of table IDs to list of column IDs in that table
self.table_to_column_ids = collections.defaultdict(list) # type: Dict[str, List[str]]
# All the available datasets (not necessarily all present in this context, yet)
self.table_context = table_context
# Table ids included in this context.
self.table_ids = set() # type: Set[str]
# Stores the list of columns grouped by, or None if this expression isn't grouped.
self.group_by_paths = None # type: Optional[List[Tuple[str, str]]]
# Additional context (names of variables) used for looking up fields, but not for
# grouping.
self.subcontext = None # type: Optional[EvaluationContext]
# If true, then aggregation expressions will not be evaluated, but instead will
# return a syntax tree node, corresponding to their expression with any subexpressions
# properly evaluated.
self.exclude_aggregation = False
# The names of the selector columns. Used to canonicalize references to selectors to
# a path (_SELECTOR_TABLE, name)
self.selector_names = [] # type: List[str]
def add_subcontext(self, subcontext):
# type: (EvaluationContext) -> None
'''Adds another context to this one.
The subcontext will be used for looking up columns. It's necessary when the subcontext
is a group by context, but the current context is not, as we can't join a pandas
DataFrame and DataFrameGroupBy into one object.
Args:
subcontext: An EvaluationContext to look up fields in, disjunct from the current one.
'''
if self.subcontext is not None:
raise ValueError("Context already has subcontext {}!".format(self.subcontext))
self.subcontext = subcontext
def maybe_add_column(self, column):
# type: (TypedSeries) -> Tuple[str, ...]
'''Ensures that column is available in the context. Returns the path to retrieve it later.
Calling this function indicates that column is an evaluated series of data that should be
available in the evaluation context. Either find it in the existing context, or add it as
a new column to the context. Then, return the identifier path to retrieve it later out of
the context, either the path to the existing column, or a new one. This may involve
generating a random name for the column if it doesn't contain one.
Args:
column: A TypedSeries of data, optionally with a name.
Returns:
An identifier path to where this column can be found in the context.
'''
name = column.series.name or uuid.uuid4().hex
canonical_paths = self.get_all_canonical_paths((name,))
if len(canonical_paths) > 1:
raise ValueError("It's confusing to add column {}; ambiguous {}".format(
name, canonical_paths))
if canonical_paths:
canonical_path, = canonical_paths
else:
canonical_path = (_SELECTOR_TABLE, name)
if canonical_path[0] != _SELECTOR_TABLE:
# We already know about this name; don't need to add to context.
return canonical_paths[0]
self.column_to_table_ids[name] = [canonical_path[0]]
column.series.name = '.'.join(canonical_path)
self.canonical_column_to_type[column.series.name] = column.type_
self.table = TypedDataFrame(
pd.concat([self.table.dataframe, column.series], axis=1),
self.table.types + [column.type_])
return canonical_path
def _partially_evaluate(self, selector, group_by_paths):
# type: (EvaluatableNode, Sequence[Tuple[str, ...]]) -> EvaluatableNode
"""Partially evaluates a selector in preparation for group by."""
if selector.is_constant():
marked_selector = GroupedBy(selector) # type: EvaluatableNode
else:
marked_selector = selector.mark_grouped_by(group_by_paths, self)
partially_evaluated_selector = marked_selector.pre_group_by_partially_evaluate(self)
# If we evaluate a selector and it's fully evaluated...
if isinstance(partially_evaluated_selector, TypedSeries):
raise ValueError("selecting expression {} that is not aggregated or grouped by"
.format(selector))
return partially_evaluated_selector
def do_group_by(self, selectors, group_by):
# type: (Sequence[EvaluatableNode], List[Field]) -> List[EvaluatableNode]
"""Groups the current context by the requested paths.
Canonicalizes the paths (figures out which table a plain column name goes with), applies
the pandas groupby operation to the context's table, and saves the group by paths to mark
this context as a Group By context (which changes how aggregating expressions like sum
or max operate, from operating over all rows to just the rows within a group).
Args:
paths: A list of column paths, i.e. a column name or a table, column pair.
These are as requested by the user string.
"""
if isinstance(self.table.dataframe, pd.core.groupby.DataFrameGroupBy):
raise ValueError("Context already grouped!")
group_by_paths = [self.get_canonical_path(field.path) for field in group_by]
new_selectors = [self._partially_evaluate(selector, group_by_paths)
for selector in selectors]
if group_by_paths:
group_by_fields = ['.'.join(path) for path in group_by_paths]
grouped = self.table.dataframe.groupby(by=group_by_fields)
else:
# If no paths are specified to group by, group all rows into one group.
# This case is invoked when the query contains aggregation but no
# explicit GROUP BY clause.
grouped = self.table.dataframe.groupby(by=lambda _: 1)
self.table = TypedDataFrame(grouped, self.table.types)
self.group_by_paths = group_by_paths
return new_selectors
def add_table_from_node(self, from_item, alias):
# type: (DataframeNode, Union[_EmptyNode, str]) -> Tuple[TypedDataFrame, str]
'''Add a table to the query's scope when it is FROM-ed or JOIN-ed.
Args:
from_item: A node representing a FROM expression
alias: An alias for the given table
Returns:
Tuple of the table that has been added to the scope
(TypedDataFrame) and its name/label.
'''
table, table_id = from_item.get_dataframe(self.table_context)
return self.add_table_from_dataframe(table, table_id, alias)
def add_table_from_dataframe(self, table, # type: TypedDataFrame
table_id, # type: Optional[str]
alias # type: Union[_EmptyNode, str]
):
# type: (...) -> Tuple[TypedDataFrame, str]
'''Add a table to the query's scope, given an already resolved DataFrame.
Args:
dataframe: The table to add, already in TypedDataFrame format
table_id: The default table name, if available
alias: An alias for the given table
Returns:
Tuple of the table that has been added to the scope
(TypedDataFrame) and its name/label.
'''
# Alias, if provided, defines the table's name
if not isinstance(alias, _EmptyNode):
table_id = alias
elif not table_id:
table_id = '__join{}'.format(len(self.table_ids))
# Save mapping of column ID <-> table IDs
for column in table.dataframe.columns:
column_name = column.split('.')[-1]
self.column_to_table_ids[column_name].append(table_id)
self.table_to_column_ids[table_id].append(column_name)
self.table_ids.add(table_id)
# Rename columns in format "[new_table_id].[column_name]"
table = TypedDataFrame(
table.dataframe.rename(
columns=lambda column_name: '{}.{}'.format(table_id, column_name.split('.')[-1])),
table.types)
# Save mapping of column ID to type
if len(table.dataframe.columns) != len(table.types):
raise ValueError('Context: Number of columns and types not equal: {} != {}'.format(
len(table.dataframe.columns), len(table.types)))
for column, type_ in zip(table.dataframe.columns, table.types):
self.canonical_column_to_type[column] = type_
self.table = table
return table, table_id
@classmethod
def clone_context_new_table(cls, table, old_context):
# type: (TypedDataFrame, EvaluationContext) -> EvaluationContext
'''Clone a context - use all the same metadata as the given ("old")
context except for the table, which is specified separately.
The new table must contain the same columns as the old context's table
in order for the metadata to remain valid.
Args:
table: TypedDataFrame containing the data that the new context represents
old_context: Old context from which to copy metadata
'''
if any([new != old for (new, old) in zip(table.dataframe.columns,
old_context.table.dataframe.columns)]):
raise ValueError('Columns differ when cloning context with new table: {} vs {}'.format(
table.dataframe.columns, old_context.table.dataframe.columns))
new_context = cls(old_context.table_context)
new_context.table = table
new_context.canonical_column_to_type = old_context.canonical_column_to_type
new_context.column_to_table_ids = old_context.column_to_table_ids
new_context.table_ids = old_context.table_ids
new_context.group_by_paths = old_context.group_by_paths
new_context.subcontext = old_context.subcontext
new_context.exclude_aggregation = old_context.exclude_aggregation
return new_context
def get_all_canonical_paths(self, path):
# type: (Tuple[str, ...]) -> List[Tuple[str, str]]
'''
Find all the possible table-column pairs that the given path could refer to.
Args:
path: A tuple of either just (column name) or (table name, column name)
Returns:
A list of (table name, column name) tuples
'''
result = [] # type: List[Tuple[str, str]]
if len(path) == 1:
column, = path
result = [(table_id, column) for table_id in self.column_to_table_ids[column]]
elif len(path) == 2:
# Check that this path is valid
table_id, column = path
if table_id not in self.column_to_table_ids[column] and not self.subcontext:
raise ValueError("field {} is not present in table {} (only {})".format(
column, table_id, self.column_to_table_ids[column]))
result = [(table_id, column)]
else:
raise NotImplementedError('Array fields are not implemented; path {}'.format(path))
# If path wasn't found in the current context, try the subcontext
if not result and self.subcontext:
result = self.subcontext.get_all_canonical_paths(path)
if not result and column in self.selector_names:
return [(_SELECTOR_TABLE, path[0])]
return result
def get_canonical_path(self, path):
# type: (Tuple[str, ...]) -> Tuple[str, str]
'''
Get exactly one table-column pair for the given path (or throw error).
If path is already a (table, column) tuple, this just makes sure it exists in the dataset.
If table name is not provided in the path, this finds the table that the column belongs to.
Args:
path: A tuple of either just (column name) or (table name, column name)
Returns:
A tuple of the table and column name
'''
all_paths = self.get_all_canonical_paths(path)
field = '.'.join(path)
if len(all_paths) == 0:
raise ValueError("field {} is not present in any from'ed tables".format(field))
if len(all_paths) > 1:
raise ValueError("field {} is ambiguous: present in {!r}".format(field, all_paths))
return all_paths[0]
def lookup(self, path):
# type: (Tuple[str, ...]) -> TypedSeries
'''
Gets a column from the context's table.
Args:
path: Path to the column, as a tuple of strings
Returns:
A TypedSeries representing the requested column, or a KeyError if not found
'''
key = '.'.join(self.get_canonical_path(path))
# First, try looking up the path in the current context. If the path is not found,
# it's not an error (yet), as we continue on and look in the subcontext, if one exists.
try:
series = self.table.dataframe[key]
except KeyError:
if self.subcontext:
return self.subcontext.lookup(path)
else:
raise KeyError(("path {!r} (canonicalized to key {!r}) not present in table; "
"columns available: {!r}").format(path, key,
list(self.table.dataframe)))
try:
type_ = self.canonical_column_to_type[key]
except KeyError:
raise KeyError(("path {!r} (canonicalized to key {!r}) not present in type dict; "
"columns available: {!r}").format(
path, key, list(self.canonical_column_to_type.keys())))
return TypedSeries(series, type_)
EMPTY_CONTEXT = EvaluationContext(TableContext())
| |
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for evaluation_helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow_gan.examples import evaluation_helper as evaluation
from tensorflow_gan.python import contrib_utils as contrib
def _local_variable(val, name):
return tf.Variable(
val, name=name, collections=[tf.compat.v1.GraphKeys.LOCAL_VARIABLES])
class CheckpointIteratorTest(tf.test.TestCase):
def testReturnsEmptyIfNoCheckpointsFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'no_checkpoints_found')
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 0)
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
if tf.executing_eagerly():
return
checkpoint_dir = os.path.join(self.get_temp_dir(), 'one_checkpoint_found')
if not tf.io.gfile.exists(checkpoint_dir):
tf.io.gfile.makedirs(checkpoint_dir)
global_step = tf.compat.v1.train.get_or_create_global_step()
saver = tf.compat.v1.train.Saver() # Saves the global step.
with self.cached_session() as session:
session.run(tf.compat.v1.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
def testReturnsSingleCheckpointIfOneShardedCheckpoint(self):
if tf.executing_eagerly():
return
checkpoint_dir = os.path.join(self.get_temp_dir(),
'one_checkpoint_found_sharded')
if not tf.io.gfile.exists(checkpoint_dir):
tf.io.gfile.makedirs(checkpoint_dir)
global_step = tf.compat.v1.train.get_or_create_global_step()
# This will result in 3 different checkpoint shard files.
with tf.device('/cpu:0'):
tf.Variable(10, name='v0')
with tf.device('/cpu:1'):
tf.Variable(20, name='v1')
saver = tf.compat.v1.train.Saver(sharded=True)
with tf.compat.v1.Session(
target='',
config=tf.compat.v1.ConfigProto(device_count={'CPU': 2})) as session:
session.run(tf.compat.v1.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, 'model.ckpt')
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in evaluation.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
def testTimeoutFn(self):
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
results = list(
evaluation.checkpoints_iterator(
'/non-existent-dir', timeout=0.1, timeout_fn=timeout_fn))
self.assertEqual([], results)
self.assertEqual(4, timeout_fn_calls[0])
def logistic_classifier(inputs):
return tf.compat.v1.layers.dense(inputs, 1, activation=tf.sigmoid)
class EvaluateOnceTest(tf.test.TestCase):
def setUp(self):
super(EvaluateOnceTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with tf.Graph().as_default():
tf.compat.v1.random.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = tf.compat.v1.losses.log_loss(tf_predictions, tf_labels)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = contrib.create_train_op(loss, optimizer)
with tf.compat.v1.train.MonitoredTrainingSession(
hooks=[tf.estimator.StopAtStepHook(num_steps)],
checkpoint_dir=checkpoint_dir) as sess:
loss = None
while not sess.should_stop():
loss = sess.run(train_op)
if num_steps >= 300:
assert loss < .015
def testEvaluatePerfectModel(self):
if tf.executing_eagerly():
# tf.metrics.accuracy is not supported when eager execution is enabled.
return
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_once')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = tf.constant(self._inputs, dtype=tf.float32)
labels = tf.constant(self._labels, dtype=tf.float32)
logits = logistic_classifier(inputs)
predictions = tf.round(logits)
accuracy, update_op = tf.compat.v1.metrics.accuracy(
predictions=predictions, labels=labels)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
])
self.assertTrue(final_ops_values['accuracy'] > .99)
def testEvalOpAndFinalOp(self):
if tf.executing_eagerly():
return
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = tf.constant(self._inputs, dtype=tf.float32)
logistic_classifier(inputs)
num_evals = 5
final_increment = 9.0
try:
my_var = _local_variable(0.0, name='MyVar')
except TypeError: # `collections` doesn't exist in TF2.
return
eval_ops = tf.compat.v1.assign_add(my_var, 1.0)
final_ops = tf.identity(my_var) + final_increment
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=[
evaluation.StopAfterNEvalsHook(num_evals),
])
self.assertEqual(final_ops_values['value'], num_evals + final_increment)
def testOnlyFinalOp(self):
if tf.executing_eagerly():
return
checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = evaluation.wait_for_new_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = tf.constant(self._inputs, dtype=tf.float32)
logistic_classifier(inputs)
final_increment = 9.0
try:
my_var = _local_variable(0.0, name='MyVar')
except TypeError: # `collections` doesn't exist in TF2.
return
final_ops = tf.identity(my_var) + final_increment
final_ops_values = evaluation.evaluate_once(
checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
self.assertEqual(final_ops_values['value'], final_increment)
class EvaluateRepeatedlyTest(tf.test.TestCase):
def setUp(self):
super(EvaluateRepeatedlyTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with tf.Graph().as_default():
tf.compat.v1.random.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = tf.compat.v1.losses.log_loss(tf_predictions, tf_labels)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = contrib.create_train_op(loss, optimizer)
with tf.compat.v1.train.MonitoredTrainingSession(
hooks=[tf.estimator.StopAtStepHook(num_steps)],
checkpoint_dir=checkpoint_dir) as sess:
loss = None
while not sess.should_stop():
loss = sess.run(train_op)
def testEvaluatePerfectModel(self):
if tf.executing_eagerly():
return
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_repeated')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = tf.constant(self._inputs, dtype=tf.float32)
labels = tf.constant(self._labels, dtype=tf.float32)
logits = logistic_classifier(inputs)
predictions = tf.round(logits)
accuracy, update_op = tf.compat.v1.metrics.accuracy(
predictions=predictions, labels=labels)
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
],
max_number_of_evaluations=1)
self.assertTrue(final_values['accuracy'] > .99)
def testEvaluationLoopTimeout(self):
if tf.executing_eagerly():
# This test uses `tf.placeholder`, which doesn't work in eager executing.
return
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluation_loop_timeout')
if not tf.io.gfile.exists(checkpoint_dir):
tf.io.gfile.makedirs(checkpoint_dir)
# We need a variable that the saver will try to restore.
tf.compat.v1.train.get_or_create_global_step()
# Run with placeholders. If we actually try to evaluate this, we'd fail
# since we're not using a feed_dict.
cant_run_op = tf.compat.v1.placeholder(dtype=tf.float32)
start = time.time()
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=cant_run_op,
hooks=[evaluation.StopAfterNEvalsHook(10)],
timeout=6)
end = time.time()
self.assertFalse(final_values)
# Assert that we've waited for the duration of the timeout (minus the sleep
# time).
self.assertGreater(end - start, 5.0)
# Then the timeout kicked in and stops the loop.
self.assertLess(end - start, 7)
def testEvaluationLoopTimeoutWithTimeoutFn(self):
if tf.executing_eagerly():
# tf.metrics.accuracy is not supported when eager execution is enabled.
return
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluation_loop_timeout_with_timeout_fn')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = tf.constant(self._inputs, dtype=tf.float32)
labels = tf.constant(self._labels, dtype=tf.float32)
logits = logistic_classifier(inputs)
predictions = tf.round(logits)
accuracy, update_op = tf.compat.v1.metrics.accuracy(
predictions=predictions, labels=labels)
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=update_op,
final_ops={'accuracy': accuracy},
hooks=[
evaluation.StopAfterNEvalsHook(1),
],
eval_interval_secs=1,
max_number_of_evaluations=2,
timeout=0.1,
timeout_fn=timeout_fn)
# We should have evaluated once.
self.assertTrue(final_values['accuracy'] > .99)
# And called 4 times the timeout fn
self.assertEqual(4, timeout_fn_calls[0])
def testEvaluateWithEvalFeedDict(self):
if tf.executing_eagerly():
# tf.placeholder() is not compatible with eager execution.
return
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_with_eval_feed_dict')
self._train_model(checkpoint_dir, num_steps=1)
# We need a variable that the saver will try to restore.
tf.compat.v1.train.get_or_create_global_step()
# Create a variable and an eval op that increments it with a placeholder.
try:
my_var = _local_variable(0.0, name='my_var')
except TypeError: # `collections` doesn't exist in TF2.
return
increment = tf.compat.v1.placeholder(dtype=tf.float32)
eval_ops = tf.compat.v1.assign_add(my_var, increment)
increment_value = 3
num_evals = 5
expected_value = increment_value * num_evals
final_values = evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
eval_ops=eval_ops,
feed_dict={increment: 3},
final_ops={'my_var': tf.identity(my_var)},
hooks=[
evaluation.StopAfterNEvalsHook(num_evals),
],
max_number_of_evaluations=1)
self.assertEqual(final_values['my_var'], expected_value)
def _verify_events(self, output_dir, names_to_values):
"""Verifies that the given `names_to_values` are found in the summaries.
Also checks that a GraphDef was written out to the events file.
Args:
output_dir: An existing directory where summaries are found.
names_to_values: A dictionary of strings to values.
"""
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = tf.compat.v1.train.summary_iterator(output_filepath[0])
summaries = []
graph_def = None
for event in events:
if event.summary.value:
summaries.append(event.summary)
elif event.graph_def:
graph_def = event.graph_def
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name], 5)
self.assertIsNotNone(graph_def)
def testSummariesAreFlushedToDisk(self):
if tf.executing_eagerly():
# Merging tf.summary.* ops is not compatible with eager execution.
return
checkpoint_dir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed')
logdir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed_eval')
if tf.io.gfile.exists(logdir):
tf.io.gfile.rmtree(logdir)
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Create the model (which can be restored).
inputs = tf.constant(self._inputs, dtype=tf.float32)
logistic_classifier(inputs)
names_to_values = {'bread': 3.4, 'cheese': 4.5, 'tomato': 2.0}
for k in names_to_values:
v = names_to_values[k]
tf.compat.v1.summary.scalar(k, v)
evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
hooks=[
evaluation.SummaryAtEndHook(log_dir=logdir),
],
max_number_of_evaluations=1)
self._verify_events(logdir, names_to_values)
def testSummaryAtEndHookWithoutSummaries(self):
logdir = os.path.join(self.get_temp_dir(),
'summary_at_end_hook_without_summaires')
if tf.io.gfile.exists(logdir):
tf.io.gfile.rmtree(logdir)
with tf.Graph().as_default():
# Purposefully don't add any summaries. The hook will just dump the
# GraphDef event.
hook = evaluation.SummaryAtEndHook(log_dir=logdir)
hook.begin()
with self.cached_session() as session:
hook.after_create_session(session, None)
hook.end(session)
self._verify_events(logdir, {})
if __name__ == '__main__':
tf.test.main()
| |
from __future__ import print_function
import numpy as np
import pytest
import string
from keras.utils.test_utils import get_test_data, keras_test
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras import layers
import keras
@keras_test
def test_temporal_classification():
'''
Classify temporal sequences of float numbers
of length 3 into 2 classes using
single layer of GRU units and softmax applied
to the last activations of the units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 4),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(layers.GRU(8,
input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=4, batch_size=10,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['acc'][-1] >= 0.8)
config = model.get_config()
model = Sequential.from_config(config)
@keras_test
def test_temporal_classification_functional():
'''
Classify temporal sequences of float numbers
of length 3 into 2 classes using
single layer of GRU units and softmax applied
to the last activations of the units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 4),
classification=True,
num_classes=2)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
inputs = layers.Input(shape=(x_train.shape[1], x_train.shape[2]))
x = layers.SimpleRNN(8)(inputs)
outputs = layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=4, batch_size=10,
validation_data=(x_test, y_test),
verbose=0)
assert(history.history['acc'][-1] >= 0.8)
@keras_test
def test_temporal_regression():
'''
Predict float numbers (regression) based on sequences
of float numbers of length 3 using a single layer of GRU units
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
num_test=20,
input_shape=(3, 5),
output_shape=(2,),
classification=False)
model = Sequential()
model.add(layers.LSTM(y_train.shape[-1],
input_shape=(x_train.shape[1], x_train.shape[2])))
model.compile(loss='hinge', optimizer='adam')
history = model.fit(x_train, y_train, epochs=5, batch_size=16,
validation_data=(x_test, y_test), verbose=0)
assert(history.history['loss'][-1] < 1.)
@keras_test
def test_3d_to_3d():
'''
Apply a same Dense layer for each element of time dimension of the input
and make predictions of the output sequence elements.
This does not make use of the temporal structure of the sequence
(see TimeDistributedDense for more details)
'''
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_test_data(num_train=100,
num_test=20,
input_shape=(3, 5),
output_shape=(3, 5),
classification=False)
model = Sequential()
model.add(layers.TimeDistributed(
layers.Dense(y_train.shape[-1]), input_shape=(x_train.shape[1], x_train.shape[2])))
model.compile(loss='hinge', optimizer='rmsprop')
history = model.fit(x_train, y_train, epochs=20, batch_size=16,
validation_data=(x_test, y_test), verbose=0)
assert(history.history['loss'][-1] < 1.)
@keras_test
def test_stacked_lstm_char_prediction():
'''
Learn alphabetical char sequence with stacked LSTM.
Predict the whole alphabet based on the first two letters ('ab' -> 'ab...z')
See non-toy example in examples/lstm_text_generation.py
'''
# generate alphabet: http://stackoverflow.com/questions/16060899/alphabet-range-python
alphabet = string.ascii_lowercase
number_of_chars = len(alphabet)
# generate char sequences of length 'sequence_length' out of alphabet and store the next char as label (e.g. 'ab'->'c')
sequence_length = 2
sentences = [alphabet[i: i + sequence_length] for i in range(len(alphabet) - sequence_length)]
next_chars = [alphabet[i + sequence_length] for i in range(len(alphabet) - sequence_length)]
# Transform sequences and labels into 'one-hot' encoding
x = np.zeros((len(sentences), sequence_length, number_of_chars), dtype=np.bool)
y = np.zeros((len(sentences), number_of_chars), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, ord(char) - ord('a')] = 1
y[i, ord(next_chars[i]) - ord('a')] = 1
# learn the alphabet with stacked LSTM
model = Sequential([
layers.LSTM(16, return_sequences=True, input_shape=(sequence_length, number_of_chars)),
layers.LSTM(16, return_sequences=False),
layers.Dense(number_of_chars, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(x, y, batch_size=1, epochs=60, verbose=1)
# prime the model with 'ab' sequence and let it generate the learned alphabet
sentence = alphabet[:sequence_length]
generated = sentence
for iteration in range(number_of_chars - sequence_length):
x = np.zeros((1, sequence_length, number_of_chars))
for t, char in enumerate(sentence):
x[0, t, ord(char) - ord('a')] = 1.
preds = model.predict(x, verbose=0)[0]
next_char = chr(np.argmax(preds) + ord('a'))
generated += next_char
sentence = sentence[1:] + next_char
# check that it did generate the alphabet correctly
assert(generated == alphabet)
@keras_test
def test_masked_temporal():
'''
Confirm that even with masking on both inputs and outputs, cross-entropies are
of the expected scale.
In this task, there are variable length inputs of integers from 1-9, and a random
subset of unmasked outputs. Each of these outputs has a 50% probability of being
the input number unchanged, and a 50% probability of being 2*input%10.
The ground-truth best cross-entropy loss should, then be -log(0.5) = 0.69
'''
model = Sequential()
model.add(layers.Embedding(10, 10, mask_zero=True))
model.add(layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam')
x = np.random.random_integers(1, 9, (20000, 10))
for rowi in range(x.shape[0]):
padding = np.random.random_integers(x.shape[1] / 2)
x[rowi, :padding] = 0
# 50% of the time the correct output is the input.
# The other 50% of the time it's 2 * input % 10
y = (x * np.random.random_integers(1, 2, x.shape)) % 10
ys = np.zeros((y.size, 10), dtype='int32')
for i, target in enumerate(y.flat):
ys[i, target] = 1
ys = ys.reshape(y.shape + (10,))
history = model.fit(x, ys, validation_split=0.05, batch_size=10,
verbose=0, epochs=3)
ground_truth = -np.log(0.5)
assert(np.abs(history.history['loss'][-1] - ground_truth) < 0.06)
if __name__ == '__main__':
pytest.main([__file__])
| |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os, sys
import logging
import re
import glob
from inspect import isfunction
from event_emitter import EventEmitter
from pybot.brain import Brain
from pybot.response import Response
from pybot.message import CatchAllMessage
from pybot.listener import TextListener
HUBOT_DEFAULT_ADAPTERS = [ 'campire', 'shell' ]
HUBOT_DOCUMENTATION_SECTIONS = [
'description',
'dependencies',
'configuration',
'commands',
'notes',
'auther',
'authers',
'examples',
'tags',
'urls'
]
LOGGING_LEVEL = {
'critical' : 50,
'error' : 40,
'warning' : 30,
'info' : 20,
'debug' : 10,
'notset' : 0,
}
class Robot(object):
"""
Robots recieve message from a chat source (Campfire, irc, etc), and
dispatch them to matching listeners.
"""
def __init__(self, adapter_namespace, adapter, httpd, name='Hubot'):
"""
Constructor.
Args:
adapter_namespace : A String of the path to local adapters.
adapter : A String of the adapter name.
httpd : A Boolean whether to enable the HTTP daemon.
name : A String of the robot name, defaults to Hubot.
Returns nothing.
"""
self.name = name
self.events = EventEmitter()
self.brain = Brain(self)
self.alias = False
self.adapter = None
self.response = Response
self.commands = []
self.listeners = []
loglevel = LOGGING_LEVEL['info']
logging.basicConfig(level=loglevel)
self.logger = logging.getLogger()
env_loglevel = os.environ.get('HUBOT_LOG_LEVEL')
if env_loglevel and env_loglevel.lower() in LOGGING_LEVEL:
loglevel = LOGGING_LEVEL[env_loglevel.lower()]
self.logger.setLevel(loglevel)
self.ping_interval_id = None
self.parse_version()
if httpd:
self.setup_express()
else:
self.setup_null_router()
self.load_adapter(adapter_namespace, adapter)
self.adapter_name = adapter
self.error_handlers = []
## TODO: Write this code as Python
"""
@on 'error', (err, msg) =>
@invokeErrorHandlers(err, msg)
process.on 'uncaughtException', (err) =>
@emit 'error', err
"""
def hear(self, regex, callback):
"""
Public:
Adds a Listener that attempts to match incoming messages based on
a Regex.
Args:
regex : A Regex that determines if the callback should be called.
callback: A Function that is called with a Response object.
Returns nothing.
"""
self.listeners.append(TextListener(self, regex.match, callback))
def respond(self, regex, callback):
"""
Public:
Adds a Listener that attempts to match incoming messages directed
at the robot based on a Regex. All regexes treat patterns like they
begin with a '^'
Args:
regex : A Regex that determines if the callback should be called.
callback: A Function that is called with a Response object.
Returns nothing.
"""
re = str(regex).split('/')
re = re[1:]
modifiers = re.pop()
## TODO: Check "is this evaluation collect?"
if re[0] and re[0][0] is '^':
self.logger.warning("Anchors don't work well with respond,",
"perhaps you want to use 'hear'")
self.logger.warning("The regex in question was %s" % str(regex))
pattern = re.join('/')
name = re.sub(r'[-[\]{}()*+?.,\\^$|#\s]', '\\$&', self.name)
if self.alias:
alias = re.sub(r'[-[\]{}()*+?.,\\^$|#\s]', '\\$&', self.alias)
pattern = (r'^\s*[@]?(?:%s[:,]?|%s[:,]?)\s*(?:%s)'
% (alias, name, pattern))
else:
pattern = r'^\s*[@]?%s[:,]?\s*(?:%s)' % (name, pattern)
flags = 0
if 'i' in modifiers: flags += re.I
if 'm' in modifiers: flags += re.M
r = re.compile(pattern, flags)
if 'g' in modifiers:
new_regex = r.match
else:
new_regex = r.search
self.listeners.append(TextListener(self, new_regex, callback))
def enter(self, callback):
"""
Public:
Adds a Listener that triggers when anyone enters the room.
Args:
callback : A Function that is called with a Response object.
Returns nothing.
"""
self.listeners.append(Listener(
self,
lambda(msg): isinstance(msg, EnterMessage),
callback))
def leave(self, callback):
"""
Public:
Adds a Listener that triggers when anyone leaves the room.
Args:
callback : A Function that is called with a Response object.
Returns nothing.
"""
self.listeners.append(Listener(
self,
lambda(msg): isinstance(msg, LeaveMessage)),
callback)
def topic(self, callback):
"""
Public:
Adds a Listener that triggers when anyone changes the topic.
Args:
callback : A Function that is called with a Response object.
Returns nothing.
"""
self.listeners.append(Listener(
self,
lambda(msg): isinstance(msg, TopicMessage),
callback))
def error(self, callback):
"""
Public:
Adds an error handler when an uncaught exception or user emitted
error event occurs.
Args:
callback : A Function that is called with the error object.
Return nothing.
"""
self.error_handlers.append(callback)
def invoke_error_handlers(self, err, msg):
"""
Calls and passes any registered error handlers for unhandled exceptions
or user emitted error events.
Args:
err : An Error object.
msg : An optional Response object that generated the error
Returns nothing.
"""
self.logger.error(err.stack)
for error_handler in self.error_handlers:
try:
error_handler(err, msg)
except:
self.logger.error("while invoking error handler: %s\n%s" %
(err, err.stack))
def catch_all(self, callback):
"""
Public.
Adds a Listener that triggers when no other text matchers match.
Args:
callback : A Function that is called with a Response object.
Return nothing.
"""
self.listeners.append(Listener(self,
lambda msg: isinstance(msg, CatchAllMessage),
callback_for_listener))
def callback_for_listener(msg):
msg.message = msg.message.message
callback(msg)
def recieve(self, message):
"""
Public.
Passes the given message to any interested Listeners.
Args:
message : A Message instance. Listeners can flag this message as
'done' to prevent futher execution.
Returns nothing.
"""
results = []
## TODO: This need try, except processing ?
for listener in self.listeners:
results.append(listener.call(message))
if message.done:
break
if not isinstance(message, CatchAllMessage) and True not in results:
self.recieve(CatchAllMessage(message))
def load_file(self, path, namespace, file):
"""
Public.
Loads a file in path.
Args:
path : A String path on the filesystem.
file : A String filename in path on the filesystem.
Returns nothing.
"""
script_name = os.path.splitext(file)[0]
try:
script = getattr(__import__(namespace + '.' + script_name,
fromlist=[namespace.split('.')]),
'script')
if isfunction(script):
script(self)
#self._parse_help(os.path.join(path, file), script_name)
else:
self.logger.warn("Expected %s to assign a function, "
"but it's not defined in this script" %
namespace + '.' + script_name)
except (AttributeError, NameError) as e:
print >> sys.stderr, 'Unable to load ' + file + ': ' + e.message
#sys.exit(1)
def load(self, path, namespace):
"""
Public.
Loads every script in the given path.
Args:
path : A String path on the filesystem.
Returns nothing.
"""
self.logger.debug("Loading scripts from %s" % path)
if os.path.exists(path):
#files = glob.glob(path + '/*.py')
files = os.listdir(path)
for file in files:
self.logger.debug("Found '%s' script file" % file)
self.load_file(path, namespace, file)
def load_hubot_scripts(self, path, namespace, scripts):
"""
Public.
Load scripts specified in the `hubot-scripts.json` file.
Args:
path : A String path to the hubot-scripts files.
scripts : An Array of scripts to load.
Returns nothing.
"""
self.logger.debug("Loading hubot-scritps from %s" % path)
for script in scripts:
self.load_file(path, namespace, script)
def load_external_scripts(self, packages):
pass
def setup_express(self):
pass
def setup_null_router(self):
pass
def load_adapter(self, namespace, adapter):
"""
Load the adapter Hubot is going to use.
Args:
path : A String of the path to adapter if local
adapter : A String of the adapter name to use
Returns nothing.
"""
self.logger.debug("Loading adapter %s" % adapter)
#if adapter in HUBOT_DEFAULT_ADAPTERS:
# path += '/' + adapter
#else:
# path = 'huobt-' + adapter
namespace += '.' + adapter
target = namespace.split('.')
(package, module, cls_name) = (target[0],
namespace,
target[-1])
cls_name = cls_name[0].upper() + cls_name[1:]
Adapter = getattr(__import__(module, fromlist=[package]), cls_name)
self.adapter = Adapter(self)
def help_commands(self):
pass
def _parse_help(self, path, script_name):
"""
Private:
load help info from a loaded script.
Args:
path : A String path to the file on disk.
Returns nothing.
"""
self.logger.debug("Parsing help for %s" % path)
script_document = {}
## TODO: Complete code of this function
def send(self, user, *strings):
"""
Public:
A helper send function which delegates to the adapter's send function.
Args:
user : A User instance.
string : One or more String for each message to send.
Returns nothing.
"""
self.adapter.send(user, *strings)
def reply(self, user, *strings):
"""
Public:
A helper reply function which delegates to the adapter's reply
function.
Args:
user : A User instance.
string : One or more Strings for each message to send.
Returns nothing.
"""
self.adapter.reply(user, *strings)
def message_room(self, room, *strings):
"""
Public:
A helper send function to message a room that the robot is in.
Args:
room : String designating the room to message.
strings : One or more Strings for each message to send.
Returns nothing.
"""
user = {room: room }
self.adapter.send(user, *strings)
def on(self, event, *args):
## TODO: Check is this args comment right?
"""
Public:
A wrapper around the EventEmitter API to make usage sematicly better.
Args:
event : The event name.
listener: A Function that is called with the event parameter
when event happens.
Returns nothing.
"""
self.events.on(event, *args)
def emit(self, event, *args):
"""
Public:
A wrapper around the EventEmitter API to make usage semanticly better.
Args:
event : The event name.
*args : Arguments emitted by the event
"""
self.events.emit(event, *args)
def run(self):
"""
Public:
Kick off the event loop for the adapter
Returns nothing.
"""
self.emit("running")
self.adapter.run()
def shutdown(self):
"""
Public:
Gracefully shutdown the robot process
Returns nothing.
"""
if self.ping_interval_id:
clear_interval(self.ping_interval_id)
self.adapter.close()
self.brain.close()
def parse_version(self):
"""
Public:
The version of Hubot from npm
Returns a String of the version number.
"""
## TODO: Write Code insted of below tmporary one
self.version = '0.1.0'
return self.version
def http(self, url, options):
"""
### Not implemented !!! ###
Public:
Creates a scoped http client with chainable methods for medifying the
request. This doesn't actually make a request though.
Once your the request os assembled, you can call `get()`/`post()`/etc
to send the request.
Args:
url : String URL to access.
options : Optional options to pass on to the client.
Exmples:
robot.http("http://example.com")
# set a single header
.header('Authorization', 'bearer abcdef')
# set multiple headers
.headers(Authorization: 'bearer abcdef', Accept: 'application')
# add URI query parameters
.query(a: 1, b: 'foo & bar')
# make the actual request
.get() (err, res, body) ->
console.log body
# or , you can POST data
.post(data) (err, res, body) ->
console.log body
# Can also set options
robot.http("https://example.com", {rejectUnauthorized: false})
Returns a ScopedClient instance.
"""
pass
| |
# Command related to creation and execution: run, new, clean, test, auto-test
import sys
import os
import subprocess
import shutil
import getopt
import urllib2
import webbrowser
import time
import signal
from play.utils import *
COMMANDS = ['run', 'new', 'clean', 'test', 'autotest', 'auto-test', 'id', 'new,run', 'clean,run', 'modules']
HELP = {
'id': "Define the framework ID",
'new': "Create a new application",
'clean': "Delete temporary files (including the bytecode cache)",
'run': "Run the application in the current shell",
'test': "Run the application in test mode in the current shell",
'auto-test': "Automatically run all application tests",
'modules': "Display the computed modules list"
}
def execute(**kargs):
command = kargs.get("command")
app = kargs.get("app")
args = kargs.get("args")
env = kargs.get("env")
cmdloader = kargs.get("cmdloader")
if command == 'id':
id(env)
if command == 'new' or command == 'new,run':
new(app, args, env, cmdloader)
if command == 'clean' or command == 'clean,run':
clean(app)
if command == 'new,run' or command == 'clean,run' or command == 'run':
run(app, args)
if command == 'test':
test(app, args)
if command == 'auto-test' or command == 'autotest':
autotest(app, args)
if command == 'modules':
show_modules(app, args)
def new(app, args, env, cmdloader=None):
withModules = []
application_name = None
try:
optlist, args = getopt.getopt(args, '', ['with=', 'name='])
for o, a in optlist:
if o in ('--with'):
withModules = a.split(',')
if o in ('--name'):
application_name = a
except getopt.GetoptError, err:
print "~ %s" % str(err)
print "~ Sorry, unrecognized option"
print "~ "
sys.exit(-1)
if os.path.exists(app.path):
print "~ Oops. %s already exists" % app.path
print "~"
sys.exit(-1)
md = []
for m in withModules:
dirname = None
if os.path.exists(os.path.join(env["basedir"], 'modules/%s' % m)) and os.path.isdir(os.path.join(env["basedir"], 'modules/%s' % m)):
dirname = m
else:
for f in os.listdir(os.path.join(env["basedir"], 'modules')):
if os.path.isdir(os.path.join(env["basedir"], 'modules/%s' % f)) and f.find('%s-' % m) == 0:
dirname = f
break
if not dirname:
print "~ Oops. No module %s found" % m
print "~ Try to install it using 'play install %s'" % m
print "~"
sys.exit(-1)
md.append(dirname)
print "~ The new application will be created in %s" % os.path.normpath(app.path)
if application_name is None:
application_name = raw_input("~ What is the application name? [%s] " % os.path.basename(app.path))
if application_name == "":
application_name = os.path.basename(app.path)
copy_directory(os.path.join(env["basedir"], 'resources/application-skel'), app.path)
os.mkdir(os.path.join(app.path, 'app/models'))
os.mkdir(os.path.join(app.path, 'lib'))
app.check()
replaceAll(os.path.join(app.path, 'conf/application.conf'), r'%APPLICATION_NAME%', application_name)
replaceAll(os.path.join(app.path, 'conf/application.conf'), r'%SECRET_KEY%', secretKey())
print "~"
# Configure modules
runDepsAfter = False
for m in md:
# Check dependencies.yml of the module
depsYaml = os.path.join(env["basedir"], 'modules/%s/conf/dependencies.yml' % m)
if os.path.exists(depsYaml):
deps = open(depsYaml).read()
try:
moduleDefinition = re.search(r'self:\s*(.*)\s*', deps).group(1)
replaceAll(os.path.join(app.path, 'conf/dependencies.yml'), r'- play\n', '- play\n - %s\n' % moduleDefinition )
runDepsAfter = True
except Exception:
pass
if runDepsAfter:
cmdloader.commands['dependencies'].execute(command='dependencies', app=app, args=['--sync'], env=env, cmdloader=cmdloader)
print "~ OK, the application is created."
print "~ Start it with : play run %s" % sys.argv[2]
print "~ Have fun!"
print "~"
process = None
def handle_sigterm(signum, frame):
global process
if 'process' in globals():
process.terminate()
sys.exit(0)
def run(app, args):
global process
app.check()
print "~ Ctrl+C to stop"
print "~ "
java_cmd = app.java_cmd(args)
try:
process = subprocess.Popen (java_cmd, env=os.environ)
signal.signal(signal.SIGTERM, handle_sigterm)
process.wait()
except OSError:
print "Could not execute the java executable, please make sure the JAVA_HOME environment variable is set properly (the java executable should reside at JAVA_HOME/bin/java). "
sys.exit(-1)
print
def clean(app):
app.check()
print "~ Deleting %s" % os.path.normpath(os.path.join(app.path, 'tmp'))
if os.path.exists(os.path.join(app.path, 'tmp')):
shutil.rmtree(os.path.join(app.path, 'tmp'))
print "~"
def show_modules(app, args):
app.check()
modules = app.modules()
if len(modules):
print "~ Application modules are:"
print "~ "
for module in modules:
print "~ %s" % module
else:
print "~ No modules installed in this application"
print "~ "
sys.exit(0)
def test(app, args):
app.check()
java_cmd = app.java_cmd(args)
print "~ Running in test mode"
print "~ Ctrl+C to stop"
print "~ "
try:
subprocess.call(java_cmd, env=os.environ)
except OSError:
print "Could not execute the java executable, please make sure the JAVA_HOME environment variable is set properly (the java executable should reside at JAVA_HOME/bin/java). "
sys.exit(-1)
print
def autotest(app, args):
app.check()
print "~ Running in test mode"
print "~ Ctrl+C to stop"
print "~ "
print "~ Deleting %s" % os.path.normpath(os.path.join(app.path, 'tmp'))
if os.path.exists(os.path.join(app.path, 'tmp')):
shutil.rmtree(os.path.join(app.path, 'tmp'))
print "~"
# Kill if exists
http_port = 9000
protocol = 'http'
if app.readConf('https.port'):
http_port = app.readConf('https.port')
protocol = 'https'
else:
http_port = app.readConf('http.port')
try:
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
opener.open('http://localhost:%s/@kill' % http_port)
except Exception, e:
pass
# Run app
test_result = os.path.join(app.path, 'test-result')
if os.path.exists(test_result):
shutil.rmtree(test_result)
sout = open(os.path.join(app.log_path(), 'system.out'), 'w')
java_cmd = app.java_cmd(args)
try:
play_process = subprocess.Popen(java_cmd, env=os.environ, stdout=sout)
except OSError:
print "Could not execute the java executable, please make sure the JAVA_HOME environment variable is set properly (the java executable should reside at JAVA_HOME/bin/java). "
sys.exit(-1)
soutint = open(os.path.join(app.log_path(), 'system.out'), 'r')
while True:
if play_process.poll():
print "~"
print "~ Oops, application has not started?"
print "~"
sys.exit(-1)
line = soutint.readline().strip()
if line:
print line
if line.find('Listening for HTTP') > -1:
soutint.close()
break
# Run FirePhoque
print "~"
headless_browser = ''
if app.readConf('headlessBrowser'):
headless_browser = app.readConf('headlessBrowser')
fpcp = [os.path.join(app.play_env["basedir"], 'modules/testrunner/lib/play-testrunner.jar')]
fpcp_libs = os.path.join(app.play_env["basedir"], 'modules/testrunner/firephoque')
for jar in os.listdir(fpcp_libs):
if jar.endswith('.jar'):
fpcp.append(os.path.normpath(os.path.join(fpcp_libs, jar)))
cp_args = ':'.join(fpcp)
if os.name == 'nt':
cp_args = ';'.join(fpcp)
java_cmd = [app.java_path(), '-classpath', cp_args, '-Dapplication.url=%s://localhost:%s' % (protocol, http_port), '-DheadlessBrowser=%s' % (headless_browser), 'play.modules.testrunner.FirePhoque']
try:
subprocess.call(java_cmd, env=os.environ)
except OSError:
print "Could not execute the headless browser. "
sys.exit(-1)
print "~"
time.sleep(1)
# Kill if exists
http_port = app.readConf('http.port')
try:
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
opener.open('%s://localhost:%s/@kill' % (protocol, http_port))
except Exception, e:
pass
if os.path.exists(os.path.join(app.path, 'test-result/result.passed')):
print "~ All tests passed"
print "~"
testspassed = True
if os.path.exists(os.path.join(app.path, 'test-result/result.failed')):
print "~ Some tests have failed. See file://%s for results" % test_result
print "~"
sys.exit(1)
def id(play_env):
if not play_env["id"]:
print "~ framework ID is not set"
new_id = raw_input("~ What is the new framework ID (or blank to unset)? ")
if new_id:
print "~"
print "~ OK, the framework ID is now %s" % new_id
print "~"
open(play_env["id_file"], 'w').write(new_id)
else:
print "~"
print "~ OK, the framework ID is unset"
print "~"
if os.path.exists(play_env["id_file"]):
os.remove(play_env["id_file"])
# ~~~~~~~~~ UTILS
def kill(pid):
if os.name == 'nt':
import ctypes
handle = ctypes.windll.kernel32.OpenProcess(1, False, int(pid))
if not ctypes.windll.kernel32.TerminateProcess(handle, 0):
print "~ Cannot kill the process with pid %s (ERROR %s)" % (pid, ctypes.windll.kernel32.GetLastError())
print "~ "
sys.exit(-1)
else:
try:
os.kill(int(pid), 15)
except OSError:
print "~ Play was not running (Process id %s not found)" % pid
print "~"
sys.exit(-1)
| |
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import sqlalchemy as sql
from sqlalchemy import or_
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from tricircle.common import constants
from tricircle.common.context import is_admin_context as _is_admin_context
from tricircle.common import exceptions
from tricircle.common.i18n import _
from tricircle.db import core
from tricircle.db import models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def db_test_stub(*args):
pass
def create_pod(context, pod_dict):
with context.session.begin():
return core.create_resource(context, models.Pod, pod_dict)
def delete_pod(context, pod_id):
with context.session.begin():
return core.delete_resource(context, models.Pod, pod_id)
def get_pod(context, pod_id):
with context.session.begin():
return core.get_resource(context, models.Pod, pod_id)
def list_pods(context, filters=None, sorts=None):
return core.query_resource(context, models.Pod, filters or [],
sorts or [])
def update_pod(context, pod_id, update_dict):
with context.session.begin():
return core.update_resource(context, models.Pod, pod_id, update_dict)
def create_cached_endpoints(context, config_dict):
with context.session.begin():
return core.create_resource(context, models.CachedEndpoint,
config_dict)
def delete_cached_endpoints(context, config_id):
with context.session.begin():
return core.delete_resource(context, models.CachedEndpoint,
config_id)
def get_cached_endpoints(context, config_id):
with context.session.begin():
return core.get_resource(context, models.CachedEndpoint,
config_id)
def list_cached_endpoints(context, filters=None, sorts=None):
return core.query_resource(context, models.CachedEndpoint,
filters or [], sorts or [])
def update_cached_endpoints(context, config_id, update_dict):
with context.session.begin():
return core.update_resource(
context, models.CachedEndpoint, config_id, update_dict)
def create_resource_mapping(context, top_id, bottom_id, pod_id, project_id,
resource_type):
try:
context.session.begin()
route = core.create_resource(context, models.ResourceRouting,
{'top_id': top_id,
'bottom_id': bottom_id,
'pod_id': pod_id,
'project_id': project_id,
'resource_type': resource_type})
context.session.commit()
return route
except db_exc.DBDuplicateEntry:
# entry has already been created
context.session.rollback()
return None
finally:
context.session.close()
def list_resource_routings(context, filters=None, limit=None, marker=None,
sorts=None):
"""Return a list of limited number of resource routings
:param context:
:param filters: list of filter dict with key 'key', 'comparator', 'value'
:param limit: an integer that limits the maximum number of items
returned in a single response
:param marker: id of the last item in the previous list
:param sorts: a list of (sort_key, sort_dir) pair,
for example, [('id', 'desc')]
:return: a list of limited number of items
"""
with context.session.begin():
return core.paginate_query(context, models.ResourceRouting,
limit,
models.ResourceRouting(
id=marker) if marker else None,
filters or [], sorts or [])
def get_resource_routing(context, id):
with context.session.begin():
return core.get_resource(context, models.ResourceRouting, id)
def delete_resource_routing(context, id):
with context.session.begin():
return core.delete_resource(context, models.ResourceRouting, id)
def update_resource_routing(context, id, update_dict):
with context.session.begin():
return core.update_resource(context, models.ResourceRouting, id,
update_dict)
def get_bottom_mappings_by_top_id(context, top_id, resource_type):
"""Get resource id and pod name on bottom
:param context: context object
:param top_id: resource id on top
:param resource_type: resource type
:return: a list of tuple (pod dict, bottom_id)
"""
route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id},
{'key': 'resource_type',
'comparator': 'eq',
'value': resource_type}]
mappings = []
with context.session.begin():
routes = core.query_resource(
context, models.ResourceRouting, route_filters, [])
for route in routes:
if not route['bottom_id']:
continue
pod = core.get_resource(context, models.Pod, route['pod_id'])
mappings.append((pod, route['bottom_id']))
return mappings
def delete_pre_created_resource_mapping(context, name):
with context.session.begin():
entries = core.query_resource(
context, models.ResourceRouting,
filters=[{'key': 'top_id', 'comparator': 'eq',
'value': name}], sorts=[])
if entries:
core.delete_resources(
context, models.ResourceRouting,
filters=[{'key': 'top_id', 'comparator': 'eq',
'value': entries[0]['bottom_id']}])
core.delete_resource(context, models.ResourceRouting,
entries[0]['id'])
def get_pod_by_top_id(context, _id):
"""Get pod resource from pod table by top id of resource
:param context: context object
:param _id: the top id of resource
:returns: pod resource
"""
route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': _id}]
with context.session.begin():
routes = core.query_resource(
context, models.ResourceRouting, route_filters, [])
if not routes or len(routes) != 1:
return None
route = routes[0]
if not route['bottom_id']:
return None
return core.get_resource(context, models.Pod, route['pod_id'])
def get_bottom_id_by_top_id_region_name(context, top_id,
region_name, resource_type):
"""Get resource bottom id by top id and bottom pod name
:param context: context object
:param top_id: resource id on top
:param region_name: name of bottom pod
:param resource_type: resource type
:return:
"""
mappings = get_bottom_mappings_by_top_id(context, top_id, resource_type)
for pod, bottom_id in mappings:
if pod['region_name'] == region_name:
return bottom_id
return None
def get_bottom_mappings_by_tenant_pod(context,
tenant_id,
pod_id,
resource_type):
"""Get resource routing for specific tenant and pod
:param context: context object
:param tenant_id: tenant id to look up
:param pod_id: pod to look up
:param resource_type: specific resource
:return: a dic {top_id : route}
"""
route_filters = [{'key': 'pod_id',
'comparator': 'eq',
'value': pod_id},
{'key': 'project_id',
'comparator': 'eq',
'value': tenant_id},
{'key': 'resource_type',
'comparator': 'eq',
'value': resource_type}]
routings = {}
with context.session.begin():
routes = core.query_resource(
context, models.ResourceRouting, route_filters, [])
for _route in routes:
if not _route['bottom_id']:
continue
routings[_route['top_id']] = _route
return routings
def delete_mappings_by_top_id(context, top_id, pod_id=None):
"""Delete resource routing entry based on top resource ID
If pod ID is also provided, only entry in the specific pod will be deleted
:param context: context object
:param top_id: top resource ID
:param pod_id: optional pod ID
:return: None
"""
filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id}]
if pod_id:
filters.append({'key': 'pod_id', 'comparator': 'eq', 'value': pod_id})
with context.session.begin():
core.delete_resources(context, models.ResourceRouting, filters=filters)
def delete_mappings_by_bottom_id(context, bottom_id):
with context.session.begin():
core.delete_resources(
context, models.ResourceRouting,
filters=[{'key': 'bottom_id', 'comparator': 'eq',
'value': bottom_id}])
def get_next_bottom_pod(context, current_pod_id=None):
pods = list_pods(context, sorts=[(models.Pod.pod_id, True)])
# NOTE(zhiyuan) number of pods is small, just traverse to filter top pod
pods = [pod for pod in pods if pod['az_name']]
for index, pod in enumerate(pods):
if not current_pod_id:
return pod
if pod['pod_id'] == current_pod_id and index < len(pods) - 1:
return pods[index + 1]
return None
def get_top_pod(context):
filters = [{'key': 'az_name', 'comparator': 'eq', 'value': ''}]
pods = list_pods(context, filters=filters)
# only one should be searched
for pod in pods:
if (pod['region_name'] != '') and \
(pod['az_name'] == ''):
return pod
return None
def get_pod_by_name(context, region_name):
filters = [{'key': 'region_name',
'comparator': 'eq', 'value': region_name}]
pods = list_pods(context, filters=filters)
# only one should be searched
for pod in pods:
if pod['region_name'] == region_name:
return pod
return None
def find_pods_by_az_or_region(context, az_or_region):
# if az_or_region is None or empty, returning None value directly.
if not az_or_region:
return None
query = context.session.query(models.Pod)
query = query.filter(or_(models.Pod.region_name == az_or_region,
models.Pod.az_name == az_or_region))
return [obj.to_dict() for obj in query]
def find_pod_by_az_or_region(context, az_or_region):
pods = find_pods_by_az_or_region(context, az_or_region)
# if pods is None, returning None value directly.
if pods is None:
return None
# if no pod is matched, then we will raise an exception
if len(pods) < 1:
raise exceptions.PodNotFound(az_or_region)
# if the pods list only contain one pod, then this pod will be
# returned back
if len(pods) == 1:
return pods[0]
# if the pods list contains more than one pod, then we will raise an
# exception
if len(pods) > 1:
raise exceptions.InvalidInput(
reason='Multiple pods with the same az_name are found')
def new_job(context, project_id, _type, resource_id):
with context.session.begin():
job_dict = {'id': uuidutils.generate_uuid(),
'type': _type,
'status': constants.JS_New,
'project_id': project_id,
'resource_id': resource_id,
'extra_id': uuidutils.generate_uuid()}
job = core.create_resource(context,
models.AsyncJob, job_dict)
return job
def register_job(context, project_id, _type, resource_id):
try:
context.session.begin()
job_dict = {'id': uuidutils.generate_uuid(),
'type': _type,
'status': constants.JS_Running,
'project_id': project_id,
'resource_id': resource_id,
'extra_id': constants.SP_EXTRA_ID}
job = core.create_resource(context,
models.AsyncJob, job_dict)
context.session.commit()
return job
except db_exc.DBDuplicateEntry:
context.session.rollback()
return None
except db_exc.DBDeadlock:
context.session.rollback()
return None
finally:
context.session.close()
def get_latest_failed_or_new_jobs(context):
current_timestamp = timeutils.utcnow()
time_span = datetime.timedelta(seconds=CONF.redo_time_span)
latest_timestamp = current_timestamp - time_span
failed_jobs = []
new_jobs = []
# first we group the jobs by type and resource id, and in each group we
# pick the latest timestamp
stmt = context.session.query(
models.AsyncJob.type, models.AsyncJob.resource_id,
sql.func.max(models.AsyncJob.timestamp).label('timestamp'))
stmt = stmt.filter(models.AsyncJob.timestamp >= latest_timestamp)
stmt = stmt.group_by(models.AsyncJob.type,
models.AsyncJob.resource_id).subquery()
# then we join the result with the original table and group again, in each
# group, we pick the "minimum" of the status, for status, the ascendant
# sort sequence is "0_Fail", "1_Success", "2_Running", "3_New"
query = context.session.query(models.AsyncJob.type,
models.AsyncJob.resource_id,
models.AsyncJob.project_id,
sql.func.min(models.AsyncJob.status)).join(
stmt, sql.and_(models.AsyncJob.type == stmt.c.type,
models.AsyncJob.resource_id == stmt.c.resource_id,
models.AsyncJob.timestamp == stmt.c.timestamp))
query = query.group_by(models.AsyncJob.project_id,
models.AsyncJob.type,
models.AsyncJob.resource_id)
for job_type, resource_id, project_id, status in query:
if status == constants.JS_Fail:
failed_jobs.append({'type': job_type, 'resource_id': resource_id,
'project_id': project_id})
elif status == constants.JS_New:
new_jobs.append({'type': job_type, 'resource_id': resource_id,
'project_id': project_id})
return failed_jobs, new_jobs
def get_job(context, job_id):
with context.session.begin():
return core.get_resource(context, models.AsyncJob, job_id)
def get_job_from_log(context, job_id):
with context.session.begin():
return core.get_resource(context, models.AsyncJobLog, job_id)
def delete_job(context, job_id):
with context.session.begin():
return core.delete_resource(context, models.AsyncJob, job_id)
def list_jobs(context, filters=None, sorts=None, limit=None, marker=None):
with context.session.begin():
marker_obj = None
if marker is not None:
marker_obj = context.session.query(models.AsyncJob).filter(
models.AsyncJob.id == marker).first()
return core.paginate_query(
context, models.AsyncJob, limit, marker_obj,
filters or [], sorts or [])
def list_jobs_from_log(context, filters=None, sorts=None,
limit=None, marker=None):
with context.session.begin():
marker_obj = None
if marker is not None:
marker_obj = context.session.query(models.AsyncJobLog).filter(
models.AsyncJobLog.id == marker).first()
filter_is_success = True
if filters is not None and len(filters) > 0:
for filter in filters:
if filter.get('key') == 'status':
job_status = filter['value']
# job entry in job log table has no
# status attribute.
if job_status == constants.JS_Success:
filters.remove(filter)
else:
filter_is_success = False
break
if filter_is_success:
return core.paginate_query(context, models.AsyncJobLog, limit,
marker_obj,
filters or [], sorts or [])
return []
def get_latest_job(context, status, _type, resource_id):
jobs = core.query_resource(
context, models.AsyncJob,
[{'key': 'status', 'comparator': 'eq', 'value': status},
{'key': 'type', 'comparator': 'eq', 'value': _type},
{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}],
[('timestamp', False)])
if jobs:
return jobs[0]
else:
return None
def get_running_job(context, _type, resource_id):
jobs = core.query_resource(
context, models.AsyncJob,
[{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id},
{'key': 'status', 'comparator': 'eq', 'value': constants.JS_Running},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
if jobs:
return jobs[0]
else:
return None
def finish_job(context, job_id, successful, timestamp):
status = constants.JS_Success if successful else constants.JS_Fail
retries = 5
for i in range(retries + 1):
try:
with context.session.begin():
db_test_stub(i)
job_dict = {'status': status,
'timestamp': timestamp,
'extra_id': uuidutils.generate_uuid()}
job = core.update_resource(context, models.AsyncJob, job_id,
job_dict)
if status == constants.JS_Success:
log_dict = {'id': uuidutils.generate_uuid(),
'type': job['type'],
'project_id': job['project_id'],
'timestamp': timestamp,
'resource_id': job['resource_id']}
context.session.query(models.AsyncJob).filter(
sql.and_(
models.AsyncJob.type == job['type'],
models.AsyncJob.resource_id == job['resource_id'],
models.AsyncJob.timestamp <= timestamp)).delete(
synchronize_session=False)
core.create_resource(context, models.AsyncJobLog, log_dict)
else:
# sqlite has problem handling "<" operator on timestamp,
# so we slide the timestamp a bit and use "<="
timestamp = timestamp - datetime.timedelta(microseconds=1)
context.session.query(models.AsyncJob).filter(
sql.and_(
models.AsyncJob.type == job['type'],
models.AsyncJob.resource_id == job['resource_id'],
models.AsyncJob.timestamp <= timestamp)).delete(
synchronize_session=False)
except db_exc.DBDeadlock:
if i == retries:
raise
time.sleep(1)
continue
return
def ensure_agent_exists(context, pod_id, host, _type, tunnel_ip):
try:
context.session.begin()
agents = core.query_resource(
context, models.ShadowAgent,
[{'key': 'host', 'comparator': 'eq', 'value': host},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
if agents:
return
core.create_resource(context, models.ShadowAgent,
{'id': uuidutils.generate_uuid(),
'pod_id': pod_id,
'host': host,
'type': _type,
'tunnel_ip': tunnel_ip})
context.session.commit()
except db_exc.DBDuplicateEntry:
# agent has already been created
context.session.rollback()
finally:
context.session.close()
def get_agent_by_host_type(context, host, _type):
agents = core.query_resource(
context, models.ShadowAgent,
[{'key': 'host', 'comparator': 'eq', 'value': host},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
return agents[0] if agents else None
def _is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if _is_user_context(context):
if not context.project_id:
raise exceptions.NotAuthorized()
elif context.project_id != project_id:
raise exceptions.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if _is_user_context(context):
if not context.user_id:
raise exceptions.NotAuthorized()
elif context.user_id != user_id:
raise exceptions.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not _is_admin_context(args[0]):
raise exceptions.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not _is_admin_context(args[0]) and not _is_user_context(args[0]):
raise exceptions.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning("Deadlock detected when running "
"'%(func_name)s': Retrying...",
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def handle_db_data_error(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exceptions.Invalid(msg)
except Exception as e:
LOG.exception(str(e))
raise
return wrapper
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or context.session
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
elif read_deleted == 'int_no':
query = query.filter_by(deleted=0)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and _is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def is_valid_model_filters(model, filters):
"""Return True if filter values exist on the model
:param model: a Cinder model
:param filters: dictionary of filters
"""
for key in filters.keys():
if not hasattr(model, key):
return False
return True
def create_recycle_resource(context, resource_id, resource_type, project_id):
try:
context.session.begin()
route = core.create_resource(context, models.RecycleResources,
{'resource_id': resource_id,
'resource_type': resource_type,
'project_id': project_id})
context.session.commit()
return route
except db_exc.DBDuplicateEntry:
# entry has already been created
context.session.rollback()
return None
finally:
context.session.close()
def list_recycle_resources(context, filters=None, sorts=None):
with context.session.begin():
resources = core.query_resource(
context, models.RecycleResources, filters or [], sorts or [])
return resources
def delete_recycle_resource(context, resource_id):
with context.session.begin():
return core.delete_resource(
context, models.RecycleResources, resource_id)
| |
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer, ForeignKey, DateTime, Text, Boolean, UniqueConstraint
from datetime import datetime
Base = declarative_base()
_engine = None
_session = sessionmaker()
USERLEVELS = {
'none': 0,
'user': 10,
'admin': 20,
}
MEDIATYPES = {
'video': 0,
'audio': 1
}
MEDIASTATUS = {
'not_started': 0,
'fetching_metadata': 1,
'downloading': 2,
'finished': 3,
'error': 4,
}
class Event(Base):
__tablename__ = "event"
id = Column(Integer, primary_key=True)
name = Column(String(32))
visible = Column(Boolean)
def serialize(self):
return {
'id': self.id,
'name': self.name,
'visible': self.visible
}
class Player(Base):
__tablename__ = "player"
id = Column(Integer, primary_key=True)
token = Column(String(16), unique=True, index=True)
event = Column(ForeignKey('event.id'))
name = Column(String(32))
last = Column(ForeignKey('media.id'), default=None)
status = Column(Integer, default=0)
def serialize(self, show_token=False):
return {
'id': self.id,
'token': self.token if show_token else None,
'name': self.name,
'last': self.last,
'event_id': self.event,
'status': self.status
}
class Skip(Base):
__tablename__ = "skip"
id = Column(Integer, primary_key=True)
user = Column(ForeignKey('user.id'))
media = Column(ForeignKey('media.id'))
player = Column(ForeignKey('player.id'))
__table_args__ = (
UniqueConstraint('user', 'media', 'player', name='_user_media_player_uc'),
)
class SourceQueue(Base):
__tablename__ = "sourcequeue"
id = Column(Integer, primary_key=True)
user = Column(ForeignKey('user.id'))
target = Column(ForeignKey('player.id'))
created_at = Column(DateTime(timezone=True), default=datetime.utcnow())
# Allow only one source queue per user per target player
__table_args__ = (
UniqueConstraint('user', 'target', name='_user_target_uc'),
)
def serialize(self):
s = db_session()
items = [media.serialize() for media in s.query(Media).filter_by(queue=self.id).all()],
s.close()
return {
'id': self.id,
'user': self.user,
'target': self.target,
'items': items
}
class Source(Base):
__tablename__ = "source"
id = Column(Integer, primary_key=True)
hash = Column(String(64), default='')
file_name = Column(String(256), default='')
file_ext = Column(String(4))
mime_type = Column(String(32))
size_bytes = Column(Integer, default=0)
media_type = Column(Integer, default=0)
youtube_hash = Column(String(32), default='')
other_url = Column(String(512), default='')
length_seconds = Column(Integer, default=0)
created_at = Column(DateTime(timezone=True), default=datetime.utcnow())
title = Column(String(100), default='')
description = Column(Text, default='')
status = Column(Integer, default=0)
message = Column(String(64), default='')
video_codec = Column(String(16), default='')
video_bitrate = Column(Integer, default=0)
video_w = Column(Integer, default=0)
video_h = Column(Integer, default=0)
audio_codec = Column(String(16), default='')
audio_bitrate = Column(Integer, default=0)
def serialize(self):
return {
'id': self.id,
'youtube_hash': self.youtube_hash,
'other_url': self.other_url,
'status': self.status,
'title': self.title,
'description': self.description,
'file_ext': self.file_ext,
'mime_type': self.mime_type,
'size_bytes': self.size_bytes,
'media_type': self.media_type,
'length_seconds': self.length_seconds,
'message': self.message,
'audio': {
'codec': self.audio_codec,
'bitrate': self.audio_bitrate
},
'video': {
'codec': self.video_codec,
'bitrate': self.video_bitrate,
'width': self.video_w,
'height': self.video_h
}
}
class Media(Base):
__tablename__ = "media"
id = Column(Integer, primary_key=True)
source = Column(ForeignKey('source.id'), nullable=True, default=None)
user = Column(ForeignKey('user.id'))
queue = Column(ForeignKey('sourcequeue.id'))
def serialize(self):
s = db_session()
source_entry = s.query(Source).filter_by(id=self.source).one().serialize() if self.source else None
s.close()
return {
'id': self.id,
'source_id': self.source,
'source': source_entry,
'user_id': self.user
}
class Setting(Base):
__tablename__ = "setting"
id = Column(Integer, primary_key=True)
user = Column(ForeignKey('user.id'))
key = Column(String(32))
value = Column(String(32))
type = Column(Integer)
max = Column(Integer)
min = Column(Integer)
def serialize(self):
return {
'id': self.id,
'key': self.key,
'value': self.value,
'type': self.type,
'max': self.max,
'min': self.min
}
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
username = Column(String(32), unique=True)
password = Column(String(255))
nickname = Column(String(32))
email = Column(String(128))
level = Column(Integer, default=USERLEVELS['none'])
def serialize(self):
return {
'id': self.id,
'username': self.username,
'nickname': self.nickname,
'email': self.email,
'level': self.level
}
class Session(Base):
__tablename__ = "session"
key = Column(String(32), primary_key=True)
user = Column(ForeignKey('user.id'))
start = Column(DateTime(timezone=True), default=datetime.utcnow())
def db_init(engine_str):
_engine = create_engine(engine_str+'?charset=utf8', pool_recycle=3600)
_session.configure(bind=_engine)
# Base.metadata.create_all(_engine)
def db_session():
return _session()
| |
"""
Instruction transformations.
"""
from __future__ import absolute_import
from .ast import Def, Var, Apply
from .ti import ti_xform, TypeEnv, get_type_env, TypeConstraint
from collections import OrderedDict
from functools import reduce
try:
from typing import Union, Iterator, Sequence, Iterable, List, Dict # noqa
from typing import Optional, Set # noqa
from .ast import Expr, VarAtomMap # noqa
from .isa import TargetISA # noqa
from .typevar import TypeVar # noqa
from .instructions import ConstrList, Instruction # noqa
DefApply = Union[Def, Apply]
except ImportError:
pass
def canonicalize_defapply(node):
# type: (DefApply) -> Def
"""
Canonicalize a `Def` or `Apply` node into a `Def`.
An `Apply` becomes a `Def` with an empty list of defs.
"""
if isinstance(node, Apply):
return Def((), node)
else:
return node
class Rtl(object):
"""
Register Transfer Language list.
An RTL object contains a list of register assignments in the form of `Def`
objects.
An RTL list can represent both a source pattern to be matched, or a
destination pattern to be inserted.
"""
def __init__(self, *args):
# type: (*DefApply) -> None
self.rtl = tuple(map(canonicalize_defapply, args))
def copy(self, m):
# type: (VarAtomMap) -> Rtl
"""
Return a copy of this rtl with all Vars substituted with copies or
according to m. Update m as neccessary.
"""
return Rtl(*[d.copy(m) for d in self.rtl])
def vars(self):
# type: () -> Set[Var]
"""Return the set of all Vars in self that correspond to SSA values"""
return reduce(lambda x, y: x.union(y),
[d.vars() for d in self.rtl],
set([]))
def definitions(self):
# type: () -> Set[Var]
""" Return the set of all Vars defined in self"""
return reduce(lambda x, y: x.union(y),
[d.definitions() for d in self.rtl],
set([]))
def free_vars(self):
# type: () -> Set[Var]
"""Return the set of free Vars corresp. to SSA vals used in self"""
def flow_f(s, d):
# type: (Set[Var], Def) -> Set[Var]
"""Compute the change in the set of free vars across a Def"""
s = s.difference(set(d.defs))
uses = set(d.expr.args[i] for i in d.expr.inst.value_opnums)
for v in uses:
assert isinstance(v, Var)
s.add(v)
return s
return reduce(flow_f, reversed(self.rtl), set([]))
def substitution(self, other, s):
# type: (Rtl, VarAtomMap) -> Optional[VarAtomMap]
"""
If the Rtl self agrees structurally with the Rtl other, return a
substitution to transform self to other. Two Rtls agree structurally if
they have the same sequence of Defs, that agree structurally.
"""
if len(self.rtl) != len(other.rtl):
return None
for i in range(len(self.rtl)):
s = self.rtl[i].substitution(other.rtl[i], s)
if s is None:
return None
return s
def is_concrete(self):
# type: (Rtl) -> bool
"""Return True iff every Var in the self has a singleton type."""
return all(v.get_typevar().singleton_type() is not None
for v in self.vars())
def cleanup_concrete_rtl(self):
# type: (Rtl) -> None
"""
Given that there is only 1 possible concrete typing T for self, assign
a singleton TV with type t=T[v] for each Var v \in self. Its an error
to call this on an Rtl with more than 1 possible typing. This modifies
the Rtl in-place.
"""
from .ti import ti_rtl, TypeEnv
# 1) Infer the types of all vars in res
typenv = get_type_env(ti_rtl(self, TypeEnv()))
typenv.normalize()
typenv = typenv.extract()
# 2) Make sure there is only one possible type assignment
typings = list(typenv.concrete_typings())
assert len(typings) == 1
typing = typings[0]
# 3) Assign the only possible type to each variable.
for v in typenv.vars:
assert typing[v].singleton_type() is not None
v.set_typevar(typing[v])
def __str__(self):
# type: () -> str
return "\n".join(map(str, self.rtl))
class XForm(object):
"""
An instruction transformation consists of a source and destination pattern.
Patterns are expressed in *register transfer language* as tuples of
`ast.Def` or `ast.Expr` nodes. A pattern may optionally have a sequence of
TypeConstraints, that additionally limit the set of cases when it applies.
A legalization pattern must have a source pattern containing only a single
instruction.
>>> from base.instructions import iconst, iadd, iadd_imm
>>> a = Var('a')
>>> c = Var('c')
>>> v = Var('v')
>>> x = Var('x')
>>> XForm(
... Rtl(c << iconst(v),
... a << iadd(x, c)),
... Rtl(a << iadd_imm(x, v)))
XForm(inputs=[Var(v), Var(x)], defs=[Var(c, src), Var(a, src, dst)],
c << iconst(v)
a << iadd(x, c)
=>
a << iadd_imm(x, v)
)
"""
def __init__(self, src, dst, constraints=None):
# type: (Rtl, Rtl, Optional[ConstrList]) -> None
self.src = src
self.dst = dst
# Variables that are inputs to the source pattern.
self.inputs = list() # type: List[Var]
# Variables defined in either src or dst.
self.defs = list() # type: List[Var]
# Rewrite variables in src and dst RTL lists to our own copies.
# Map name -> private Var.
symtab = dict() # type: Dict[str, Var]
self._rewrite_rtl(src, symtab, Var.SRCCTX)
num_src_inputs = len(self.inputs)
self._rewrite_rtl(dst, symtab, Var.DSTCTX)
# Needed for testing type inference on XForms
self.symtab = symtab
# Check for inconsistently used inputs.
for i in self.inputs:
if not i.is_input():
raise AssertionError(
"'{}' used as both input and def".format(i))
# Check for spurious inputs in dst.
if len(self.inputs) > num_src_inputs:
raise AssertionError(
"extra inputs in dst RTL: {}".format(
self.inputs[num_src_inputs:]))
# Perform type inference and cleanup
raw_ti = get_type_env(ti_xform(self, TypeEnv()))
raw_ti.normalize()
self.ti = raw_ti.extract()
def interp_tv(tv):
# type: (TypeVar) -> TypeVar
""" Convert typevars according to symtab """
if not tv.name.startswith("typeof_"):
return tv
return symtab[tv.name[len("typeof_"):]].get_typevar()
self.constraints = [] # type: List[TypeConstraint]
if constraints is not None:
if isinstance(constraints, TypeConstraint):
constr_list = [constraints] # type: Sequence[TypeConstraint]
else:
constr_list = constraints
for c in constr_list:
type_m = {tv: interp_tv(tv) for tv in c.tvs()}
inner_c = c.translate(type_m)
self.constraints.append(inner_c)
self.ti.add_constraint(inner_c)
# Sanity: The set of inferred free typevars should be a subset of the
# TVs corresponding to Vars appearing in src
free_typevars = set(self.ti.free_typevars())
src_vars = set(self.inputs).union(
[x for x in self.defs if not x.is_temp()])
src_tvs = set([v.get_typevar() for v in src_vars])
if (not free_typevars.issubset(src_tvs)):
raise AssertionError(
"Some free vars don't appear in src - {}"
.format(free_typevars.difference(src_tvs)))
# Update the type vars for each Var to their inferred values
for v in self.inputs + self.defs:
v.set_typevar(self.ti[v.get_typevar()])
def __repr__(self):
# type: () -> str
s = "XForm(inputs={}, defs={},\n ".format(self.inputs, self.defs)
s += '\n '.join(str(n) for n in self.src.rtl)
s += '\n=>\n '
s += '\n '.join(str(n) for n in self.dst.rtl)
s += '\n)'
return s
def _rewrite_rtl(self, rtl, symtab, context):
# type: (Rtl, Dict[str, Var], int) -> None
for line in rtl.rtl:
if isinstance(line, Def):
line.defs = tuple(
self._rewrite_defs(line, symtab, context))
expr = line.expr
else:
expr = line
self._rewrite_expr(expr, symtab, context)
def _rewrite_expr(self, expr, symtab, context):
# type: (Apply, Dict[str, Var], int) -> None
"""
Find all uses of variables in `expr` and replace them with our own
local symbols.
"""
# Accept a whole expression tree.
stack = [expr]
while len(stack) > 0:
expr = stack.pop()
expr.args = tuple(
self._rewrite_uses(expr, stack, symtab, context))
def _rewrite_defs(self, line, symtab, context):
# type: (Def, Dict[str, Var], int) -> Iterable[Var]
"""
Given a tuple of symbols defined in a Def, rewrite them to local
symbols. Yield the new locals.
"""
for sym in line.defs:
name = str(sym)
if name in symtab:
var = symtab[name]
if var.get_def(context):
raise AssertionError("'{}' multiply defined".format(name))
else:
var = Var(name)
symtab[name] = var
self.defs.append(var)
var.set_def(context, line)
yield var
def _rewrite_uses(self, expr, stack, symtab, context):
# type: (Apply, List[Apply], Dict[str, Var], int) -> Iterable[Expr]
"""
Given an `Apply` expr, rewrite all uses in its arguments to local
variables. Yield a sequence of new arguments.
Append any `Apply` arguments to `stack`.
"""
for arg, operand in zip(expr.args, expr.inst.ins):
# Nested instructions are allowed. Visit recursively.
if isinstance(arg, Apply):
stack.append(arg)
yield arg
continue
if not isinstance(arg, Var):
assert not operand.is_value(), "Value arg must be `Var`"
yield arg
continue
# This is supposed to be a symbolic value reference.
name = str(arg)
if name in symtab:
var = symtab[name]
# The variable must be used consistently as a def or input.
if not var.is_input() and not var.get_def(context):
raise AssertionError(
"'{}' used as both input and def"
.format(name))
else:
# First time use of variable.
var = Var(name)
symtab[name] = var
self.inputs.append(var)
yield var
def verify_legalize(self):
# type: () -> None
"""
Verify that this is a valid legalization XForm.
- The source pattern must describe a single instruction.
- All values defined in the output pattern must be defined in the
destination pattern.
"""
assert len(self.src.rtl) == 1, "Legalize needs single instruction."
for d in self.src.rtl[0].defs:
if not d.is_output():
raise AssertionError(
'{} not defined in dest pattern'.format(d))
def apply(self, r, suffix=None):
# type: (Rtl, str) -> Rtl
"""
Given a concrete Rtl r s.t. r matches self.src, return the
corresponding concrete self.dst. If suffix is provided, any temporary
defs are renamed with '.suffix' appended to their old name.
"""
assert r.is_concrete()
s = self.src.substitution(r, {}) # type: VarAtomMap
assert s is not None
if (suffix is not None):
for v in self.dst.vars():
if v.is_temp():
assert v not in s
s[v] = Var(v.name + '.' + suffix)
dst = self.dst.copy(s)
dst.cleanup_concrete_rtl()
return dst
class XFormGroup(object):
"""
A group of related transformations.
:param isa: A target ISA whose instructions are allowed.
:param chain: A next level group to try if this one doesn't match.
"""
def __init__(self, name, doc, isa=None, chain=None):
# type: (str, str, TargetISA, XFormGroup) -> None
self.xforms = list() # type: List[XForm]
self.custom = OrderedDict() # type: OrderedDict[Instruction, str]
self.name = name
self.__doc__ = doc
self.isa = isa
self.chain = chain
def __str__(self):
# type: () -> str
if self.isa:
return '{}.{}'.format(self.isa.name, self.name)
else:
return self.name
def rust_name(self):
# type: () -> str
"""
Get the Rust name of this function implementing this transform.
"""
if self.isa:
# This is a function in the same module as the LEGALIZE_ACTION
# table referring to it.
return self.name
else:
return '::legalizer::{}'.format(self.name)
def legalize(self, src, dst):
# type: (Union[Def, Apply], Rtl) -> None
"""
Add a legalization pattern to this group.
:param src: Single `Def` or `Apply` to be legalized.
:param dst: `Rtl` list of replacement instructions.
"""
xform = XForm(Rtl(src), dst)
xform.verify_legalize()
self.xforms.append(xform)
def custom_legalize(self, inst, funcname):
# type: (Instruction, str) -> None
"""
Add a custom legalization action for `inst`.
The `funcname` parameter is the fully qualified name of a Rust function
which takes the same arguments as the `isa::Legalize` actions.
The custom function will be called to legalize `inst` and any return
value is ignored.
"""
assert inst not in self.custom, "Duplicate custom_legalize"
self.custom[inst] = funcname
| |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['DeviceWorker', 'Hogwild', 'DownpourSGD', 'Section']
class DeviceWorker(object):
"""
DeviceWorker is an abstract class, which generates worker desc.
This class is an inner class that we do computation logics within
the implementation. For example, execution of a program or a graph.
"""
def __init__(self):
"""
Init.
"""
self._program = None
self._infer = None
def _set_infer(self, infer=False):
"""
set inference flag for current device worker
Args:
infer(bool): whether to do inference
"""
self._infer = infer
def _set_fleet_desc(self, fleet_desc):
"""
Set fleet desc.
Args:
fleet_desc(PSParameter): pslib.PSParameter object
"""
self._fleet_desc = fleet_desc
def _set_program(self, program):
"""
Set program.
Args:
program(Program): a Program object
"""
self._program = program
def _gen_worker_desc(self, trainer_desc):
"""
Generator worker desc.
Args:
trainer_desc(TrainerDesc): a TrainerDesc object
"""
raise NotImplementedError(
"DeviceWorker does not implement gen_worker_desc, "
"please use Hogwild or DownpourSGD, etc.")
class Hogwild(DeviceWorker):
"""
Hogwild is a kind of SGD algorithm.
"""
def __init__(self):
"""
Init.
"""
super(Hogwild, self).__init__()
def _gen_worker_desc(self, trainer_desc):
"""
Generator worker desc, which device worker is HogwildWorker.
Args:
trainer_desc(TrainerDesc): a TrainerDesc object
"""
trainer_desc.device_worker_name = "HogwildWorker"
if self._infer:
# just ignore feed op for inference model
trainer_desc.hogwild_param.skip_ops.extend(["feed"])
class DownpourSGD(DeviceWorker):
"""
DownpourSGD is a kind of distributed SGD algorithm.
"""
def __init__(self):
"""
Init.
initialize downpourSGD device worker
"""
super(DownpourSGD, self).__init__()
def _gen_worker_desc(self, trainer_desc):
"""
Generator worker desc, which device worker is DownpourWorker.
Args:
trainer_desc(TrainerDesc): a TrainerDesc object
"""
dense_table_set = set()
program_id = str(id(self._program))
if self._program == None:
print("program of current device worker is not configured")
exit(-1)
opt_info = self._program._fleet_opt
program_configs = opt_info["program_configs"]
downpour = trainer_desc.downpour_param
for pid in program_configs:
if pid == program_id:
pc = downpour.program_config.add()
pc.program_id = program_id
for i in program_configs[program_id]["push_sparse"]:
pc.push_sparse_table_id.extend([i])
for i in program_configs[program_id]["push_dense"]:
pc.push_dense_table_id.extend([i])
dense_table_set.add(i)
for i in program_configs[program_id]["pull_sparse"]:
pc.pull_sparse_table_id.extend([i])
for i in program_configs[program_id]["pull_dense"]:
pc.pull_dense_table_id.extend([i])
dense_table_set.add(i)
break
trainer_desc.device_worker_name = "DownpourWorker"
pull_thread = trainer_desc.pull_dense_param
pull_thread.device_num = trainer_desc.thread_num
for i in self._fleet_desc.trainer_param.dense_table:
if i.table_id in dense_table_set:
dense_table = pull_thread.dense_table.add()
dense_table.dense_value_name.extend(i.dense_variable_name)
dense_table.table_id = \
i.table_id
sparse_len = len(self._fleet_desc.trainer_param.sparse_table)
for i in range(sparse_len):
sparse_table = downpour.sparse_table.add()
sparse_table.table_id = \
self._fleet_desc.trainer_param.sparse_table[i].table_id
sparse_table.sparse_key_name.extend(
self._fleet_desc.trainer_param.sparse_table[i].slot_key)
sparse_table.sparse_value_name.extend(
self._fleet_desc.trainer_param.sparse_table[i].slot_value)
sparse_table.sparse_grad_name.extend(
self._fleet_desc.trainer_param.sparse_table[i].slot_gradient)
if opt_info["use_cvm"]:
sparse_table.emb_dim = \
self._fleet_desc.server_param.downpour_server_param.downpour_table_param[
i].accessor.fea_dim
sparse_table.fea_dim = sparse_table.emb_dim
else:
sparse_table.emb_dim = \
self._fleet_desc.server_param.downpour_server_param.downpour_table_param[
i].accessor.fea_dim - 2
sparse_table.fea_dim = sparse_table.emb_dim + 2
# TODO(guru4elephant): hard code here, need to improve
sparse_table.label_var_name = "click"
if opt_info["stat_var_names"]:
for i in opt_info["stat_var_names"]:
downpour.stat_var_names.extend([i])
for i in self._fleet_desc.trainer_param.dense_table:
if i.table_id in dense_table_set:
dense_table = downpour.dense_table.add()
dense_table.table_id = i.table_id
dense_table.dense_value_name.extend(i.dense_variable_name)
dense_table.dense_grad_name.extend(
i.dense_gradient_variable_name)
downpour.skip_ops.extend(self._fleet_desc.trainer_param.skip_op)
if self._infer:
downpour.push_dense = False
downpour.push_sparse = False
class Section(DeviceWorker):
"""
SectionWorker
"""
def __init__(self):
"""
Init.
"""
super(Section, self).__init__()
def _gen_worker_desc(self, trainer_desc):
"""
Generator worker desc, which device worker is SectionWorker.
Args:
trainer_desc(TrainerDesc): a TrainerDesc object
"""
from google.protobuf import text_format
from . import core
trainer_desc.device_worker_name = "SectionWorker"
pipeline_opt = self._program._pipeline_opt
section_param = trainer_desc.section_param
section_param.queue_size = pipeline_opt["queue_size"]
section_param.sync_steps = pipeline_opt["sync_steps"]
section_param.start_cpu_core_id = pipeline_opt["start_cpu_core_id"]
for e in pipeline_opt["param_need_sync"]:
section_param.param_need_sync.append(e)
for i, program in enumerate(pipeline_opt["section_program_list"]):
cfg = section_param.section_config.add()
cfg.program_desc.ParseFromString(program["program"]._get_desc()
.serialize_to_string())
# TODO: why does not work
#cfg.program_desc.CopyFrom(program.program._get_desc())
place = pipeline_opt["place_list"][i]
if isinstance(place, core.CPUPlace):
cfg.place = cfg.CPUPlace
elif isinstance(place, core.CUDAPlace):
cfg.place = cfg.CUDAPlace
elif isinstance(place, core.CUDAPinnedPlace):
cfg.place = cfg.CUDAPinnedPlace
else:
raise NotImplementedError(
"SectionWorker only supports CPUPlace, CUDAPlace and CUDAPinnedPlace now."
)
cfg.concurrency = pipeline_opt["concurrency_list"][i]
for var in program["input_set"]:
cfg.section_in_var_names.append(var)
for var in program["output_set"]:
cfg.section_out_var_names.append(var)
class DeviceWorkerFactory(object):
def _create_device_worker(self, worker_type):
classname = worker_type.capitalize()
return globals()[classname]()
| |
"""
HVAC channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/integrations/zha/
"""
import asyncio
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple, Union
from zigpy.exceptions import ZigbeeException
import zigpy.zcl.clusters.hvac as hvac
from zigpy.zcl.foundation import Status
from homeassistant.core import callback
from .. import registries, typing as zha_typing
from ..const import (
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_OP,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import retryable_req
from .base import ZigbeeChannel
AttributeUpdateRecord = namedtuple("AttributeUpdateRecord", "attr_id, attr_name, value")
REPORT_CONFIG_CLIMATE = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 25)
REPORT_CONFIG_CLIMATE_DEMAND = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 5)
REPORT_CONFIG_CLIMATE_DISCRETE = (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 1)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Dehumidification.cluster_id)
class Dehumidification(ZigbeeChannel):
"""Dehumidification channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Fan.cluster_id)
class FanChannel(ZigbeeChannel):
"""Fan channel."""
_value_attribute = 0
REPORT_CONFIG = ({"attr": "fan_mode", "config": REPORT_CONFIG_OP},)
@property
def fan_mode(self) -> Optional[int]:
"""Return current fan mode."""
return self.cluster.get("fan_mode")
async def async_set_speed(self, value) -> None:
"""Set the speed of the fan."""
try:
await self.cluster.write_attributes({"fan_mode": value})
except ZigbeeException as ex:
self.error("Could not set speed: %s", ex)
return
async def async_update(self) -> None:
"""Retrieve latest state."""
await self.get_attribute_value("fan_mode", from_cache=False)
@callback
def attribute_updated(self, attrid: int, value: Any) -> None:
"""Handle attribute update from fan cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
if attr_name == "fan_mode":
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, attr_name, value
)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Pump.cluster_id)
class Pump(ZigbeeChannel):
"""Pump channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.Thermostat.cluster_id)
class ThermostatChannel(ZigbeeChannel):
"""Thermostat channel."""
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Init Thermostat channel instance."""
super().__init__(cluster, ch_pool)
self._init_attrs = {
"abs_min_heat_setpoint_limit": True,
"abs_max_heat_setpoint_limit": True,
"abs_min_cool_setpoint_limit": True,
"abs_max_cool_setpoint_limit": True,
"ctrl_seqe_of_oper": False,
"local_temp": False,
"max_cool_setpoint_limit": True,
"max_heat_setpoint_limit": True,
"min_cool_setpoint_limit": True,
"min_heat_setpoint_limit": True,
"occupancy": False,
"occupied_cooling_setpoint": False,
"occupied_heating_setpoint": False,
"pi_cooling_demand": False,
"pi_heating_demand": False,
"running_mode": False,
"running_state": False,
"system_mode": False,
"unoccupied_heating_setpoint": False,
"unoccupied_cooling_setpoint": False,
}
self._abs_max_cool_setpoint_limit = 3200 # 32C
self._abs_min_cool_setpoint_limit = 1600 # 16C
self._ctrl_seqe_of_oper = 0xFF
self._abs_max_heat_setpoint_limit = 3000 # 30C
self._abs_min_heat_setpoint_limit = 700 # 7C
self._running_mode = None
self._max_cool_setpoint_limit = None
self._max_heat_setpoint_limit = None
self._min_cool_setpoint_limit = None
self._min_heat_setpoint_limit = None
self._local_temp = None
self._occupancy = None
self._occupied_cooling_setpoint = None
self._occupied_heating_setpoint = None
self._pi_cooling_demand = None
self._pi_heating_demand = None
self._running_state = None
self._system_mode = None
self._unoccupied_cooling_setpoint = None
self._unoccupied_heating_setpoint = None
self._report_config = [
{"attr": "local_temp", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupied_cooling_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupied_heating_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "unoccupied_cooling_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "unoccupied_heating_setpoint", "config": REPORT_CONFIG_CLIMATE},
{"attr": "running_mode", "config": REPORT_CONFIG_CLIMATE},
{"attr": "running_state", "config": REPORT_CONFIG_CLIMATE_DEMAND},
{"attr": "system_mode", "config": REPORT_CONFIG_CLIMATE},
{"attr": "occupancy", "config": REPORT_CONFIG_CLIMATE_DISCRETE},
{"attr": "pi_cooling_demand", "config": REPORT_CONFIG_CLIMATE_DEMAND},
{"attr": "pi_heating_demand", "config": REPORT_CONFIG_CLIMATE_DEMAND},
]
@property
def abs_max_cool_setpoint_limit(self) -> int:
"""Absolute maximum cooling setpoint."""
return self._abs_max_cool_setpoint_limit
@property
def abs_min_cool_setpoint_limit(self) -> int:
"""Absolute minimum cooling setpoint."""
return self._abs_min_cool_setpoint_limit
@property
def abs_max_heat_setpoint_limit(self) -> int:
"""Absolute maximum heating setpoint."""
return self._abs_max_heat_setpoint_limit
@property
def abs_min_heat_setpoint_limit(self) -> int:
"""Absolute minimum heating setpoint."""
return self._abs_min_heat_setpoint_limit
@property
def ctrl_seqe_of_oper(self) -> int:
"""Control Sequence of operations attribute."""
return self._ctrl_seqe_of_oper
@property
def max_cool_setpoint_limit(self) -> int:
"""Maximum cooling setpoint."""
if self._max_cool_setpoint_limit is None:
return self.abs_max_cool_setpoint_limit
return self._max_cool_setpoint_limit
@property
def min_cool_setpoint_limit(self) -> int:
"""Minimum cooling setpoint."""
if self._min_cool_setpoint_limit is None:
return self.abs_min_cool_setpoint_limit
return self._min_cool_setpoint_limit
@property
def max_heat_setpoint_limit(self) -> int:
"""Maximum heating setpoint."""
if self._max_heat_setpoint_limit is None:
return self.abs_max_heat_setpoint_limit
return self._max_heat_setpoint_limit
@property
def min_heat_setpoint_limit(self) -> int:
"""Minimum heating setpoint."""
if self._min_heat_setpoint_limit is None:
return self.abs_min_heat_setpoint_limit
return self._min_heat_setpoint_limit
@property
def local_temp(self) -> Optional[int]:
"""Thermostat temperature."""
return self._local_temp
@property
def occupancy(self) -> Optional[int]:
"""Is occupancy detected."""
return self._occupancy
@property
def occupied_cooling_setpoint(self) -> Optional[int]:
"""Temperature when room is occupied."""
return self._occupied_cooling_setpoint
@property
def occupied_heating_setpoint(self) -> Optional[int]:
"""Temperature when room is occupied."""
return self._occupied_heating_setpoint
@property
def pi_cooling_demand(self) -> int:
"""Cooling demand."""
return self._pi_cooling_demand
@property
def pi_heating_demand(self) -> int:
"""Heating demand."""
return self._pi_heating_demand
@property
def running_mode(self) -> Optional[int]:
"""Thermostat running mode."""
return self._running_mode
@property
def running_state(self) -> Optional[int]:
"""Thermostat running state, state of heat, cool, fan relays."""
return self._running_state
@property
def system_mode(self) -> Optional[int]:
"""System mode."""
return self._system_mode
@property
def unoccupied_cooling_setpoint(self) -> Optional[int]:
"""Temperature when room is not occupied."""
return self._unoccupied_cooling_setpoint
@property
def unoccupied_heating_setpoint(self) -> Optional[int]:
"""Temperature when room is not occupied."""
return self._unoccupied_heating_setpoint
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
setattr(self, f"_{attr_name}", value)
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
AttributeUpdateRecord(attrid, attr_name, value),
)
async def _chunk_attr_read(self, attrs, cached=False):
chunk, attrs = attrs[:4], attrs[4:]
while chunk:
res, fail = await self.cluster.read_attributes(chunk, allow_cache=cached)
self.debug("read attributes: Success: %s. Failed: %s", res, fail)
for attr in chunk:
self._init_attrs.pop(attr, None)
if attr in fail:
continue
if isinstance(attr, str):
setattr(self, f"_{attr}", res[attr])
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
AttributeUpdateRecord(None, attr, res[attr]),
)
chunk, attrs = attrs[:4], attrs[4:]
async def configure_reporting(self):
"""Configure attribute reporting for a cluster.
This also swallows DeliveryError exceptions that are thrown when
devices are unreachable.
"""
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
chunk, rest = self._report_config[:4], self._report_config[4:]
while chunk:
attrs = {record["attr"]: record["config"] for record in chunk}
try:
res = await self.cluster.configure_reporting_multiple(attrs, **kwargs)
self._configure_reporting_status(attrs, res[0])
except (ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting on '%s' cluster for: %s",
self.cluster.ep_attribute,
str(ex),
)
break
chunk, rest = rest[:4], rest[4:]
def _configure_reporting_status(
self, attrs: Dict[Union[int, str], Tuple], res: Union[List, Tuple]
) -> None:
"""Parse configure reporting result."""
if not isinstance(res, list):
# assume default response
self.debug(
"attr reporting for '%s' on '%s': %s",
attrs,
self.name,
res,
)
return
if res[0].status == Status.SUCCESS and len(res) == 1:
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster: %s",
attrs,
self.name,
res,
)
return
failed = [
self.cluster.attributes.get(r.attrid, [r.attrid])[0]
for r in res
if r.status != Status.SUCCESS
]
attrs = {self.cluster.attributes.get(r, [r])[0] for r in attrs}
self.debug(
"Successfully configured reporting for '%s' on '%s' cluster",
attrs - set(failed),
self.name,
)
self.debug(
"Failed to configure reporting for '%s' on '%s' cluster: %s",
failed,
self.name,
res,
)
@retryable_req(delays=(1, 1, 3))
async def async_initialize_channel_specific(self, from_cache: bool) -> None:
"""Initialize channel."""
cached = [a for a, cached in self._init_attrs.items() if cached]
uncached = [a for a, cached in self._init_attrs.items() if not cached]
await self._chunk_attr_read(cached, cached=True)
await self._chunk_attr_read(uncached, cached=False)
async def async_set_operation_mode(self, mode) -> bool:
"""Set Operation mode."""
if not await self.write_attributes({"system_mode": mode}):
self.debug("couldn't set '%s' operation mode", mode)
return False
self._system_mode = mode
self.debug("set system to %s", mode)
return True
async def async_set_heating_setpoint(
self, temperature: int, is_away: bool = False
) -> bool:
"""Set heating setpoint."""
if is_away:
data = {"unoccupied_heating_setpoint": temperature}
else:
data = {"occupied_heating_setpoint": temperature}
if not await self.write_attributes(data):
self.debug("couldn't set heating setpoint")
return False
if is_away:
self._unoccupied_heating_setpoint = temperature
else:
self._occupied_heating_setpoint = temperature
self.debug("set heating setpoint to %s", temperature)
return True
async def async_set_cooling_setpoint(
self, temperature: int, is_away: bool = False
) -> bool:
"""Set cooling setpoint."""
if is_away:
data = {"unoccupied_cooling_setpoint": temperature}
else:
data = {"occupied_cooling_setpoint": temperature}
if not await self.write_attributes(data):
self.debug("couldn't set cooling setpoint")
return False
if is_away:
self._unoccupied_cooling_setpoint = temperature
else:
self._occupied_cooling_setpoint = temperature
self.debug("set cooling setpoint to %s", temperature)
return True
async def get_occupancy(self) -> Optional[bool]:
"""Get unreportable occupancy attribute."""
try:
res, fail = await self.cluster.read_attributes(["occupancy"])
self.debug("read 'occupancy' attr, success: %s, fail: %s", res, fail)
if "occupancy" not in res:
return None
self._occupancy = res["occupancy"]
return bool(self.occupancy)
except ZigbeeException as ex:
self.debug("Couldn't read 'occupancy' attribute: %s", ex)
async def write_attributes(self, data, **kwargs):
"""Write attributes helper."""
try:
res = await self.cluster.write_attributes(data, **kwargs)
except ZigbeeException as exc:
self.debug("couldn't write %s: %s", data, exc)
return False
self.debug("wrote %s attrs, Status: %s", data, res)
return self.check_result(res)
@staticmethod
def check_result(res: list) -> bool:
"""Normalize the result."""
if not isinstance(res, list):
return False
return all([record.status == Status.SUCCESS for record in res[0]])
@registries.ZIGBEE_CHANNEL_REGISTRY.register(hvac.UserInterface.cluster_id)
class UserInterface(ZigbeeChannel):
"""User interface (thermostat) channel."""
| |
"""
Here is probably the place to write the docs, since the test-cases
show how the type behave.
Later...
"""
from ctypes import *
import sys, unittest
from ctypes.test import xfail
from test.test_support import impl_detail
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
if sys.platform == "win32":
windll = WinDLL(_ctypes_test.__file__)
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class RECT(Structure):
_fields_ = [("left", c_int), ("top", c_int),
("right", c_int), ("bottom", c_int)]
class FunctionTestCase(unittest.TestCase):
def test_mro(self):
# in Python 2.3, this raises TypeError: MRO conflict among bases classes,
# in Python 2.2 it works.
#
# But in early versions of _ctypes.c, the result of tp_new
# wasn't checked, and it even crashed Python.
# Found by Greg Chapman.
try:
class X(object, Array):
_length_ = 5
_type_ = "i"
except TypeError:
pass
from _ctypes import _Pointer
try:
class X(object, _Pointer):
pass
except TypeError:
pass
from _ctypes import _SimpleCData
try:
class X(object, _SimpleCData):
_type_ = "i"
except TypeError:
pass
try:
class X(object, Structure):
_fields_ = []
except TypeError:
pass
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, u"x", 3, 4, 5.0, 6.0)
self.assertEqual(result, 139)
self.assertEqual(type(result), int)
def test_wchar_result(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
result = f(0, 0, 0, 0, 0, 0)
self.assertEqual(result, u'\x00')
def test_voidresult(self):
f = dll._testfunc_v
f.restype = None
f.argtypes = [c_int, c_int, POINTER(c_int)]
result = c_int()
self.assertEqual(None, f(1, 2, byref(result)))
self.assertEqual(result.value, 3)
def test_intresult(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_int
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), int)
# If we declare the function to return a short,
# is the high part split off?
f.restype = c_short
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
result = f(1, 2, 3, 0x10004, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), int)
# You cannot assign character format codes as restype any longer
self.assertRaises(TypeError, setattr, f, "restype", "i")
def test_floatresult(self):
f = dll._testfunc_f_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_float
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_doubleresult(self):
f = dll._testfunc_d_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_double
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
@impl_detail('long double not supported by PyPy', pypy=False)
def test_longdoubleresult(self):
f = dll._testfunc_D_bhilfD
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_longdouble]
f.restype = c_longdouble
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
self.assertEqual(type(result), float)
result = f(-1, -2, -3, -4, -5.0, -6.0)
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
def test_longlongresult(self):
try:
c_longlong
except NameError:
return
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
result = f(1, 2, 3, 4, 5.0, 6.0)
self.assertEqual(result, 21)
f = dll._testfunc_q_bhilfdq
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double, c_longlong]
result = f(1, 2, 3, 4, 5.0, 6.0, 21)
self.assertEqual(result, 42)
def test_stringresult(self):
f = dll._testfunc_p_p
f.argtypes = None
f.restype = c_char_p
result = f("123")
self.assertEqual(result, "123")
result = f(None)
self.assertEqual(result, None)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(pointer(v))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(pointer(v))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(p)
self.assertEqual(result.contents.value, 99)
arg = byref(v)
result = f(arg)
self.assertNotEqual(result.contents, v.value)
self.assertRaises(ArgumentError, f, byref(c_short(22)))
# It is dangerous, however, because you don't control the lifetime
# of the pointer:
result = f(byref(c_int(99)))
self.assertNotEqual(result.contents, 99)
def test_errors(self):
f = dll._testfunc_p_p
f.restype = c_int
class X(Structure):
_fields_ = [("y", c_int)]
self.assertRaises(TypeError, f, X()) #cannot convert parameter
################################################################
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(2**18, cb)
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
AnotherCallback = WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, -10, cb)
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(-10, cb)
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertTrue(isinstance(value, (int, long)))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, f(1000000000000, cb))
def test_errors(self):
self.assertRaises(AttributeError, getattr, dll, "_xxx_yyy")
self.assertRaises(ValueError, c_int.in_dll, dll, "_xxx_yyy")
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(inp)
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
if sys.platform == "win32":
def test_struct_return_2H_stdcall(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
windll.s_ret_2h_func.restype = S2H
windll.s_ret_2h_func.argtypes = [S2H]
s2h = windll.s_ret_2h_func(S2H(99, 88))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
if sys.platform == "win32":
def test_struct_return_8H_stdcall(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
windll.s_ret_8i_func.restype = S8I
windll.s_ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = windll.s_ret_8i_func(inp)
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
@xfail
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
proto = CFUNCTYPE(c_int, RECT, POINT)
def callback(*args):
return 0
callback = proto(callback)
self.assertRaises(ArgumentError, lambda: callback((1, 2, 3, 4), POINT()))
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for generate_gyp_py.
It's tough to test the lower-level GetSourceFiles() and GetObjectFiles()
functions, so this focuses on the higher-level functions assuming those two
functions are working as intended (i.e., producing lists of files).
"""
import string
import unittest
import generate_gyp as gg
class ModuleUnittest(unittest.TestCase):
def testGetObjectToSourceMapping(self):
srcs = [
'a.c',
'b.asm',
'c.cc',
]
expected = {
'a.o': 'a.c',
'b.o': 'b.asm',
'c.o': 'c.cc',
}
self.assertEqual(expected, gg.GetObjectToSourceMapping(srcs))
def testGetSourceFileSet(self):
objs_to_srcs = {
'a.o': 'a.c',
'b.o': 'b.asm',
'c.o': 'c.cc',
}
objs = [
'a.o',
'c.o',
]
expected = set(['a.c', 'c.cc'])
self.assertEqual(expected, gg.GetSourceFileSet(objs_to_srcs, objs))
def testGetSourceFileSet_NotFound(self):
objs_to_srcs = {
'a.o': 'a.c',
'b.o': 'b.asm',
'c.o': 'c.cc',
}
objs = [
'd.o',
]
self.assertRaises(KeyError, gg.GetSourceFileSet, objs_to_srcs, objs)
class SourceSetUnittest(unittest.TestCase):
def testEquals(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['3']))
b = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['3']))
c = gg.SourceSet(set(['c', 'd']), set(['1']), set(['2']), set(['3']))
d = gg.SourceSet(set(['a', 'b']), set(['0']), set(['2']), set(['3']))
e = gg.SourceSet(set(['a', 'b']), set(['1']), set(['0']), set(['3']))
f = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['0']))
self.assertEqual(a, b)
self.assertNotEqual(a, c)
self.assertNotEqual(a, d)
self.assertNotEqual(a, e)
self.assertNotEqual(a, f)
def testIntersect_Exact(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['5']))
b = gg.SourceSet(set(['a', 'b']), set(['3']), set(['4']), set(['6']))
c = a.Intersect(b)
self.assertEqual(c.sources, set(['a', 'b']))
self.assertEqual(c.architectures, set(['1', '3']))
self.assertEqual(c.targets, set(['2', '4']))
self.assertEqual(c.platforms, set(['5', '6']))
self.assertFalse(c.IsEmpty())
def testIntersect_Disjoint(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['5']))
b = gg.SourceSet(set(['c', 'd']), set(['3']), set(['4']), set(['6']))
c = a.Intersect(b)
self.assertEqual(c.sources, set())
self.assertEqual(c.architectures, set(['1', '3']))
self.assertEqual(c.targets, set(['2', '4']))
self.assertEqual(c.platforms, set(['5', '6']))
self.assertTrue(c.IsEmpty())
def testIntersect_Overlap(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['5']))
b = gg.SourceSet(set(['b', 'c']), set(['3']), set(['4']), set(['6']))
c = a.Intersect(b)
self.assertEqual(c.sources, set(['b']))
self.assertEqual(c.architectures, set(['1', '3']))
self.assertEqual(c.targets, set(['2', '4']))
self.assertEqual(c.platforms, set(['5', '6']))
self.assertFalse(c.IsEmpty())
def testDifference_Exact(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['3']))
b = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['3']))
c = a.Difference(b)
self.assertEqual(c.sources, set())
self.assertEqual(c.architectures, set(['1']))
self.assertEqual(c.targets, set(['2']))
self.assertEqual(c.platforms, set(['3']))
self.assertTrue(c.IsEmpty())
def testDifference_Disjoint(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['5']))
b = gg.SourceSet(set(['c', 'd']), set(['3']), set(['4']), set(['6']))
c = a.Difference(b)
self.assertEqual(c.sources, set(['a', 'b']))
self.assertEqual(c.architectures, set())
self.assertEqual(c.targets, set())
self.assertEqual(c.platforms, set())
self.assertTrue(c.IsEmpty())
def testDifference_Overlap(self):
a = gg.SourceSet(set(['a', 'b']), set(['1']), set(['2']), set(['5']))
b = gg.SourceSet(set(['b', 'c', 'd']), set(['1', '3']), set(['2', '4']),
set(['5', '6']))
c = a.Difference(b)
self.assertEqual(c.sources, set(['a']))
self.assertEqual(c.architectures, set(['1']))
self.assertEqual(c.targets, set(['2']))
self.assertEqual(c.platforms, set(['5']))
self.assertFalse(c.IsEmpty())
def testGenerateGypStanza(self):
# ia32 should just be ia32. Win should appear as an OS restriction.
a = gg.SourceSet(set(['a', 'b']), set(['ia32']), set(['Chromium']),
set(['win'])).GenerateGypStanza()
string.index(a, 'target_arch == "ia32"')
string.index(a, 'OS == "win"')
# x64 should just be x64. Linux should not appear as an OS restriction.
a = gg.SourceSet(set(['a', 'b']), set(['x64']), set(['Chromium']),
set(['linux'])).GenerateGypStanza()
string.index(a, 'target_arch == "x64"')
self.assertEqual(string.find(a, 'OS == "linux"'), -1)
# arm should just be arm.
a = gg.SourceSet(set(['a', 'b']), set(['arm']), set(['Chromium']),
set(['linux'])).GenerateGypStanza()
string.index(a, 'target_arch == "arm"')
# arm-neon should be arm and flip the arm_neon switch.
a = gg.SourceSet(set(['a', 'b']), set(['arm-neon']), set(['Chromium']),
set(['linux'])).GenerateGypStanza()
string.index(a, 'target_arch == "arm" and arm_neon == 1')
# All architectures and all platforms case.
a = gg.SourceSet(set(['a', 'b']), set(['arm', 'arm-neon', 'x64', 'ia32']),
set(['Chromium']),
set(['win', 'linux'])).GenerateGypStanza()
string.index(a, '(1)')
self.assertEqual(string.find(a, 'OS == "linux"'), -1)
self.assertEqual(string.find(a, 'OS == "win"'), -1)
# All targets case.
a = gg.SourceSet(set(['a', 'b']), set(['arm']),
set(['Chromium', 'ChromiumOS', 'Chrome', 'ChromeOS']),
set(['win'])).GenerateGypStanza()
string.index(a, '(1)')
def assertEqualSets(self, actual, expected):
# Do pairwise checks for easier debugging.
for a in actual:
self.assertTrue(a in expected, msg='Unexpected set: %s' % a)
for e in expected:
self.assertTrue(e in actual, msg='Did not find expected set: %s' % e)
def testCreatePairwiseDisjointSets_Pair(self):
a = gg.SourceSet(set(['common', 'intel']), set(['ia32']),
set(['Chromium']), set(['win']))
b = gg.SourceSet(set(['common', 'intel', 'chrome']), set(['ia32']),
set(['Chrome']), set(['win']))
expected = []
expected.append(gg.SourceSet(set(['common', 'intel']), set(['ia32']),
set(['Chromium', 'Chrome']), set(['win'])))
expected.append(gg.SourceSet(set(['chrome']), set(['ia32']),
set(['Chrome']), set(['win'])))
sets = gg.CreatePairwiseDisjointSets([a, b])
self.assertEqualSets(sets, expected)
def testCreatePairwiseDisjointSets_Triplet(self):
a = gg.SourceSet(set(['common', 'intel']), set(['ia32']), set(['Chromium']),
set(['win']))
b = gg.SourceSet(set(['common', 'intel', 'chrome']), set(['x64']),
set(['Chrome']), set(['win']))
c = gg.SourceSet(set(['common', 'arm']), set(['arm']), set(['Chromium']),
set(['win']))
expected = []
expected.append(gg.SourceSet(set(['common']), set(['ia32', 'x64', 'arm']),
set(['Chromium', 'Chrome']), set(['win'])))
expected.append(gg.SourceSet(set(['intel']), set(['ia32', 'x64']),
set(['Chromium', 'Chrome']), set(['win'])))
expected.append(gg.SourceSet(set(['chrome']), set(['x64']),
set(['Chrome']), set(['win'])))
expected.append(gg.SourceSet(set(['arm']), set(['arm']), set(['Chromium']),
set(['win'])))
sets = gg.CreatePairwiseDisjointSets([a, b, c])
self.assertEqualSets(sets, expected)
def testCreatePairwiseDisjointSets_Multiple(self):
a = gg.SourceSet(set(['common', 'intel']),
set(['ia32']),
set(['Chromium']),
set(['linux']))
b = gg.SourceSet(set(['common', 'intel', 'chrome']),
set(['ia32']),
set(['Chrome']),
set(['linux']))
c = gg.SourceSet(set(['common', 'intel']),
set(['x64']),
set(['Chromium']),
set(['linux']))
d = gg.SourceSet(set(['common', 'intel', 'chrome']),
set(['x64']),
set(['Chrome']),
set(['linux']))
e = gg.SourceSet(set(['common', 'arm']),
set(['arm']),
set(['Chromium']),
set(['linux']))
f = gg.SourceSet(set(['common', 'arm-neon', 'chrome', 'chromeos']),
set(['arm-neon']),
set(['ChromeOS']),
set(['linux']))
expected = []
expected.append(gg.SourceSet(set(['common']),
set(['ia32', 'x64', 'arm', 'arm-neon']),
set(['Chromium', 'Chrome', 'ChromeOS']),
set(['linux'])))
expected.append(gg.SourceSet(set(['intel']),
set(['ia32', 'x64']),
set(['Chromium', 'Chrome']),
set(['linux'])))
expected.append(gg.SourceSet(set(['arm']),
set(['arm']),
set(['Chromium']),
set(['linux'])))
expected.append(gg.SourceSet(set(['chrome']),
set(['ia32', 'x64', 'arm-neon']),
set(['Chrome', 'ChromeOS']),
set(['linux'])))
expected.append(gg.SourceSet(set(['arm-neon', 'chromeos']),
set(['arm-neon']),
set(['ChromeOS']),
set(['linux'])))
sets = gg.CreatePairwiseDisjointSets([a, b, c, d, e, f])
self.assertEqualSets(sets, expected)
if __name__ == '__main__':
unittest.main()
| |
import json
import urllib2
import numpy as np
import re
class userStats:
'This class will contain the information about the user'
def __init__(self,fb_id,fb_name):
self.fb_ID = fb_id
self.fb_name = fb_name
self.num_posts=0
self.num_comments=0
self.likes_on_posts=0
self.likes_on_comments=0
self.comments_on_posts=0
self.num_words_comments=0
self.num_words_post=0
self.likes_genrated=0; #only posts
class fbAccess:
def __init__(self,AccessToken):
self.AccessToken = AccessToken
def pagingRequest(self,paging_url):
while True:
# extract access token from the request
partition1 = paging_url.partition("access_token=")
partition2 = partition1[2].partition("&")
# if tokens do not matched reconfigure request
if(partition2[0]!=self.AccessToken):
paging_url=partition1[0]+partition1[1]+self.AccessToken+partition2[1]+partition2[2]
try:
new_response = urllib2.urlopen(paging_url)
new_json_string = new_response.read()
new_feed = json.loads(new_json_string)
new_response.close()
return new_feed
except urllib2.HTTPError as e:
print e.code
error_feed = json.loads(e.read())
print error_feed['error']['message']
print "Some Problem with Facebook Access:"
self.AccessToken = raw_input("Try A New Access Token:")
except urllib2.URLError as e:
wait=raw_input("\nLost Internet connection Press Enter When connection is Back.")
except ValueError as e:
print "Recived invalid JSON Object, querying again."
except:
print "Unrecognized Error Occured"
# give option to exit or continue
wait=raw_input("Press c to continue any thing else to exit now:")
if not wait=='c':
return {}
def populateData(init_request,timelog_filename,AccessToken):
fb = fbAccess(AccessToken)
print "Querying facebook for initial list of posts",
feed = fb.pagingRequest(init_request)
print "... Data Recived"
post_count=0
contributers = {}
#open a file for appending time info
time_logs = open(timelog_filename+".txt",'w')
while 'data' in feed.keys():
for post in feed['data']:
post_count+=1
post_author_name=post['from']['name']
post_author_ID=post['from']['id']
print "Analysing Post #",post_count,"by:",post_author_name
# Author
if post_author_ID in contributers.keys():
contributers[post_author_ID].num_posts+=1
else:
new_contributer=userStats(post_author_ID,post_author_name)
contributers[post_author_ID] = new_contributer
new_contributer.num_posts+=1
# Time
if 'created_time' in post.keys():
time_logs.write(post['created_time']+'\n')
# Word Count
if 'message' in post.keys():
msg = post['message']
wordList = re.split("\.|\s|#|\*|,",msg)
wordList = filter(None,wordList)
contributers[post_author_ID].num_words_post+=len(wordList)
# Likes
if 'likes' in post.keys() and 'data' in post['likes'].keys():
like_feed=post['likes']
while 'data' in like_feed.keys():
contributers[post_author_ID].likes_on_posts+=len(like_feed['data'])
for liker in like_feed['data']:
liker_name=liker['name']
liker_id=liker['id']
if liker_id in contributers.keys():
contributers[liker_id].likes_genrated+=1
else:
new_liker = userStats(liker_id,liker_name)
contributers[liker_id]=new_liker
new_liker.likes_genrated+=1
# like paging
if 'paging' in like_feed and 'next' in like_feed['paging'].keys():
print "Querying facebook for more likes on this post",
like_feed = fb.pagingRequest(like_feed['paging']['next'])
if like_feed=={}:
return contributers
print "... Data Recived"
else:
like_feed={}
print "No more likes to explore"
else:
print "Post has no likes"
# Comments
if 'comments' in post.keys():
comment_feed = post['comments']
while 'data' in comment_feed.keys():
contributers[post_author_ID].comments_on_posts+=len(comment_feed['data'])
for comment in comment_feed['data']:
# author
commenter_id=comment['from']['id']
if commenter_id in contributers.keys():
contributers[commenter_id].num_comments+=1
else:
new_commenter=userStats(commenter_id,comment['from']['name'])
contributers[commenter_id]=new_commenter
new_commenter.num_comments+=1
# time
if 'created_time' in comment.keys():
time_logs.write(comment['created_time']+'\n')
# word count
if 'message' in comment.keys():
msg = comment['message']
wordList = re.split("\.|\s|#|\*|,",msg)
wordList = filter(None,wordList)
contributers[commenter_id].num_words_comments+=len(wordList)
# likes
if 'like_count' in comment.keys():
contributers[commenter_id].likes_on_comments+=comment['like_count']
# comment paging
if 'paging' in comment_feed and 'next' in comment_feed['paging'].keys():
print "Querying facebook for more comments on this post",
comment_feed = fb.pagingRequest(comment_feed['paging']['next'])
if comment_feed=={}:
return contributers
print "... Data Recived"
else:
comment_feed={}
print "No more comments to explore"
else:
print "Post has no comments"
# post paging
if 'paging' in feed and 'next' in feed['paging'].keys():
print "Querying facebook for more posts",
feed = fb.pagingRequest(feed['paging']['next'])
if feed=={}:
return contributers
print "... Data Recived"
else:
print "No more posts to analyse"
feed={}
time_logs.close()
return contributers
def dumpData(filename,users):
user_data = open(filename+".csv",'w')
#dump data in a csv
user_data.write("Name,Posts,Comments,Comments Recived,Likes:Posts,Likes:Comments,Likes:Genrated,Words:Posts,Words:Comments\n")
for user_id in users.keys():
try:
user=users[user_id]
user_data.write(str(user.fb_name)+",")
user_data.write(str(user.num_posts)+",")
user_data.write(str(user.num_comments)+",")
user_data.write(str(user.comments_on_posts)+",")
user_data.write(str(user.likes_on_posts)+",")
user_data.write(str(user.likes_on_comments)+",")
user_data.write(str(user.likes_genrated)+",")
user_data.write(str(user.num_words_post)+",")
user_data.write(str(user.num_words_comments)+"\n")
except UnicodeEncodeError:
print "Unrecognized characters, ignoring user."
user_data.close()
def installProxy(username,password,host,port):
proxyHandler = urllib2.ProxyHandler({'https': 'https://'+username+':'+password+'@'+host+':'+port})
proxyOpener = urllib2.build_opener(proxyHandler)
urllib2.install_opener(proxyOpener)
| |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
import re
from xml.sax.saxutils import escape, unescape
import html5lib
from html5lib import treebuilders, treewalkers, serializer
from html5lib.tokenizer import HTMLTokenizer
from html5lib.constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'callback',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'field', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
remove_tags = ['script', 'style']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
token["name"] = token["name"].lower()
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token["name"] in self.remove_tags:
token["name"] = "toberemoved"
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
if "name" in token and token["name"] == "style":
print "style", token["data"], dir(token)
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
def clean_html(buf):
"""Cleans HTML of dangerous tags and content."""
buf = buf.strip()
if not buf:
return buf
html_parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"), tokenizer=HTMLSanitizer)
dom_tree = html_parser.parseFragment(buf)
walker = treewalkers.getTreeWalker("dom")
stream = walker(dom_tree)
s = serializer.htmlserializer.HTMLSerializer(omit_optional_tags=False,quote_attr_values=True)
output = s.render(stream, 'utf-8')
while 'toberemoved' in output:
oldoutput = output
matches = re.findall(r'<toberemoved.*?>.*?</toberemoved>', output, re.DOTALL)
for s in matches:
output = output.replace(s, '')
matches = re.findall(r'</toberemoved>', output, re.DOTALL)
for s in matches:
output = output.replace(s, '')
matches = re.findall(r'<toberemoved.*?>', output, re.DOTALL)
for s in matches:
output = output.replace(s, '')
if output == oldoutput:
break
return output
| |
import copy
import unittest
from mock import patch
from datetime import datetime
import lxml.etree
import requests
import scrapper
def monkey_patch_requests_get():
def monkey_patch_get(uri, *args, **kwargs):
with open('./fixtures/%s' % uri) as fh:
extra_dict = {
'content': fh.read(),
'status_code': 200,
}
return type(
str('mocked_requests'),
(object,),
extra_dict,
)()
setattr(requests, 'get', monkey_patch_get)
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
monkey_patch_requests_get()
class TestField(BaseTestCase):
def test_raises_exception(self):
with self.assertRaises(ValueError):
scrapper.Field()
def test_basic_initialisation(self):
field = scrapper.Field('h1')
self.assertEqual(field.selector, 'h1')
self.assertEqual(field.callback, None)
self.assertEqual(field._value, None)
self.assertEqual(field.__get__(None), None)
def test_returned_value_must_be_exact(self):
field = scrapper.Field('//h1/text()')
field._value = 1234
self.assertEqual(str(field.__get__(None)), '1234')
self.assertEqual(type(field.__get__(None)), int)
def test_repr(self):
field = scrapper.Field('//h1/text()')
self.assertEqual('Field(\'//h1/text()\', None)', repr(field))
class TestItem(BaseTestCase):
def test_deepcopy(self):
class TestCrawlerClass(scrapper.Item):
title = scrapper.Field('//h1/text()')
item = TestCrawlerClass('single_entry.html')
dup = copy.deepcopy(item)
self.assertFalse(item is dup)
self.assertFalse(item.title is dup.title)
def test_unknown_selector(self):
class TestCrawlerClass(scrapper.Item):
title = scrapper.Field('//h12/text()')
item = TestCrawlerClass('single_entry.html')
self.assertIsNone(item.title)
def test_proper_initialization(self):
with patch('requests.get') as mock:
mocked_get = mock.return_value
mocked_get.status_code = 200
mocked_get.content = '<html><body>' \
'<h1>Test A1</h1></body></html>'
crawler_item = scrapper.Item('http://dummy.org')
self.assertEqual(crawler_item._url, 'http://dummy.org')
self.assertEqual(
crawler_item._response,
mocked_get,
)
self.assertEqual(
lxml.etree.tostring(crawler_item._content).decode(),
'<html><body><h1>Test A1</h1></body></html>',
)
def test_simple_selection(self):
class TestCrawlerClass(scrapper.Item):
title = scrapper.Field('//h1/text()')
with patch('requests.get') as mock:
mocked_get = mock.return_value
mocked_get.status_code = 200
mocked_get.content = '<html><body>' \
'<h1>Test A1</h1></body></html>'
crawler_item = TestCrawlerClass('http://dummy.org')
self.assertEqual(
crawler_item.title,
'Test A1',
)
def test_simple_content(self):
class TestCrawlerClass(scrapper.Item):
title = scrapper.Field('//div[@class="wrap"]/h1/text()')
author = scrapper.Field('//div[@class="wrap"]/a/text()')
author_email = scrapper.Field(
'//div[@class="wrap"]/a/@href',
lambda value, content, response: value.replace(
'emialto:', '',
),
)
content = scrapper.Field(
'//div[@class="wrap"]/div[@class="content"]/text()',
lambda value, content, response: value.strip(),
)
with patch('requests.get') as mock:
mocked_get = mock.return_value
mocked_get.status_code = 200
with open('./fixtures/single_entry.html') as fh:
mocked_get.content = fh.read()
crawler_item = TestCrawlerClass('http://dummy.org')
self.assertEqual(
crawler_item.title, 'Title',
)
self.assertEqual(
crawler_item.author, 'Author field',
)
self.assertEqual(
crawler_item.author_email, 'author@example',
)
self.assertEqual(
crawler_item.content, 'Lorem ipsum',
)
self.assertEqual(
crawler_item.as_dict(),
{
'author_email': 'author@example',
'content': 'Lorem ipsum',
'title': 'Title',
'author': 'Author field',
},
)
class TestPagination(BaseTestCase):
def test_should_throw_exception(self):
with self.assertRaises(scrapper.ScrapperException):
scrapper.Pagination()
class TestClassPagination(scrapper.Pagination):
pass
with self.assertRaises(scrapper.ScrapperException):
TestClassPagination()
class TestClassPagination(scrapper.Pagination):
item_class = scrapper.Item
with self.assertRaises(scrapper.ScrapperException):
TestClassPagination()
class TestClassPagination(scrapper.Pagination):
item_class = object
with self.assertRaises(scrapper.ScrapperException):
TestClassPagination()
def test_creation(self):
class TestClassItem(scrapper.Item):
name = scrapper.Field('//h1/text()')
class TestClassPagination(scrapper.Pagination):
item_class = TestClassItem
content_selector = '//div[@class="entry"]'
multi_item = TestClassPagination('page_1.html')
items = [item for item in multi_item]
items_names = [item.name for item in items]
self.assertEqual(len(items), 4)
self.assertEqual(items_names, [
'Auguste Eichmann',
'Dominick Von',
'Scottie Skiles',
'Almon Tromp',
])
class TestPagination(BaseTestCase):
def test_raises_exception(self):
with self.assertRaises(scrapper.ScrapperException):
scrapper.Pagination()
class TestItemSet(scrapper.Pagination):
item_class = object
with self.assertRaises(scrapper.ScrapperException):
TestItemSet()
class TestPagination(scrapper.Pagination):
item_class = scrapper.Item
with self.assertRaises(scrapper.ScrapperException):
TestPagination()
def test_initialisation(self):
class TestCrawlerClass(scrapper.Item):
title = scrapper.Field('//h1/text()')
class TestPagination(scrapper.Pagination):
base_url = ''
url = 'page_index.html'
item_class = TestCrawlerClass
links_selector = '//a/@href'
item_set = TestPagination()
items = [item for item in item_set]
self.assertEqual(len(items), 3)
items_title = [item.title for item in items]
self.assertEqual(
items_title,
['Auguste Eichmann', 'Mr. Frankie Olson', 'Luke Bins'],
)
class TestFetchDataFunction(BaseTestCase):
def test_raises_exception(self):
with patch('requests.get') as mock:
mocked_get = mock.return_value
mocked_get.status_code = 500
with self.assertRaises(scrapper.ScrapperException):
scrapper.fetch_data('http://example.org')
def test_delay(self):
old_delay = scrapper.FETCH_DATA_DELAY
scrapper.FETCH_DATA_DELAY = 1.5
with patch('requests.get') as mock:
mocked_get = mock.return_value
mocked_get.status_code = 200
mocked_get.content = ''
# first call is without delay
scrapper.fetch_data('http://example.org')
start = datetime.now()
scrapper.fetch_data('http://example.org')
delta = datetime.now() - start
delta = delta.seconds + delta.microseconds / 1000000.0
self.assertGreaterEqual(delta, scrapper.FETCH_DATA_DELAY)
scrapper.FETCH_DATA_DELAY = old_delay
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.