repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
almarklein/pyelastix
|
pyelastix.py
|
_clear_dir
|
python
|
def _clear_dir(dirName):
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
|
Remove a directory and it contents. Ignore any failures.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L174-L186
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
get_tempdir
|
python
|
def get_tempdir():
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
|
Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L189-L222
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_clear_temp_dir
|
python
|
def _clear_temp_dir():
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
|
Clear the temporary directory.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L225-L233
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_get_image_paths
|
python
|
def _get_image_paths(im1, im2):
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
|
If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L236-L267
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_system3
|
python
|
def _system3(cmd, verbose=False):
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
|
Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L272-L337
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_get_dtype_maps
|
python
|
def _get_dtype_maps():
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
|
Get dictionaries to map numpy data types to ITK types and the
other way around.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L340-L359
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
register
|
python
|
def register(im1, im2, params, exact_params=False, verbose=1):
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
|
register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L403-L554
|
[
"def get_elastix_exes():\n \"\"\" Get the executables for elastix and transformix. Raises an error\n if they cannot be found.\n \"\"\"\n if EXES:\n if EXES[0]:\n return EXES\n else:\n raise RuntimeError('No Elastix executable.')\n\n # Find exe\n elastix, ver = _find_executables('elastix')\n if elastix:\n base, ext = os.path.splitext(elastix)\n base = os.path.dirname(base)\n transformix = os.path.join(base, 'transformix' + ext)\n EXES.extend([elastix, transformix])\n print('Found %s in %r' % (ver, elastix))\n return EXES\n else:\n raise RuntimeError('Could not find Elastix executable. Download '\n 'Elastix from http://elastix.isi.uu.nl/. Pyelastix '\n 'looks for the exe in a series of common locations. '\n 'Set ELASTIX_PATH if necessary.')\n",
"def get_tempdir():\n \"\"\" Get the temporary directory where pyelastix stores its temporary\n files. The directory is specific to the current process and the\n calling thread. Generally, the user does not need this; directories\n are automatically cleaned up. Though Elastix log files are also\n written here.\n \"\"\"\n tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')\n\n # Make sure it exists\n if not os.path.isdir(tempdir):\n os.makedirs(tempdir)\n\n # Clean up all directories for which the process no longer exists\n for fname in os.listdir(tempdir):\n dirName = os.path.join(tempdir, fname)\n # Check if is right kind of dir\n if not (os.path.isdir(dirName) and fname.startswith('id_')):\n continue\n # Get pid and check if its running\n try:\n pid = int(fname.split('_')[1])\n except Exception:\n continue\n if not _is_pid_running(pid):\n _clear_dir(dirName)\n\n # Select dir that included process and thread id\n tid = id(threading.current_thread() if hasattr(threading, 'current_thread')\n else threading.currentThread())\n dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))\n if not os.path.isdir(dir):\n os.mkdir(dir)\n return dir\n",
"def _clear_temp_dir():\n \"\"\" Clear the temporary directory.\n \"\"\"\n tempdir = get_tempdir()\n for fname in os.listdir(tempdir):\n try:\n os.remove( os.path.join(tempdir, fname) )\n except Exception:\n pass\n",
"def _get_image_paths(im1, im2):\n \"\"\" If the images are paths to a file, checks whether the file exist\n and return the paths. If the images are numpy arrays, writes them\n to disk and returns the paths of the new files.\n \"\"\"\n\n paths = []\n for im in [im1, im2]:\n if im is None:\n # Groupwise registration: only one image (ndim+1 dimensions)\n paths.append(paths[0])\n continue\n\n if isinstance(im, str):\n # Given a location\n if os.path.isfile(im1):\n paths.append(im)\n else:\n raise ValueError('Image location does not exist.')\n\n elif isinstance(im, np.ndarray):\n # Given a numpy array\n id = len(paths)+1\n p = _write_image_data(im, id)\n paths.append(p)\n\n else:\n # Given something else ...\n raise ValueError('Invalid input image.')\n\n # Done\n return tuple(paths)\n",
"def _system3(cmd, verbose=False):\n \"\"\" Execute the given command in a subprocess and wait for it to finish.\n A thread is run that prints output of the process if verbose is True.\n \"\"\"\n\n # Init flag\n interrupted = False\n\n # Create progress\n if verbose > 0:\n progress = Progress()\n\n stdout = []\n def poll_process(p):\n while not interrupted:\n msg = p.stdout.readline().decode()\n if msg:\n stdout.append(msg)\n if 'error' in msg.lower():\n print(msg.rstrip())\n if verbose == 1:\n progress.reset()\n elif verbose > 1:\n print(msg.rstrip())\n elif verbose == 1:\n progress.update(msg)\n else:\n break\n time.sleep(0.01)\n #print(\"thread exit\")\n\n # Start process that runs the command\n p = subprocess.Popen(cmd, shell=True, \n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n # Keep reading stdout from it\n # thread.start_new_thread(poll_process, (p,)) Python 2.x\n my_thread = threading.Thread(target=poll_process, args=(p,))\n my_thread.setDaemon(True)\n my_thread.start()\n\n # Wait here\n try:\n while p.poll() is None:\n time.sleep(0.01)\n except KeyboardInterrupt:\n # Set flag\n interrupted = True\n # Kill subprocess\n pid = p.pid\n if hasattr(os,'kill'):\n import signal\n os.kill(pid, signal.SIGKILL)\n elif sys.platform.startswith('win'):\n kernel32 = ctypes.windll.kernel32\n handle = kernel32.OpenProcess(1, 0, pid)\n kernel32.TerminateProcess(handle, 0)\n #os.system(\"TASKKILL /PID \" + str(pid) + \" /F\")\n\n # All good?\n if interrupted:\n raise RuntimeError('Registration process interrupted by the user.')\n if p.returncode:\n stdout.append(p.stdout.read().decode())\n print(''.join(stdout))\n raise RuntimeError('An error occured during the registration.')\n",
"def _compile_params(params, im1):\n \"\"\" Compile the params dictionary:\n * Combine parameters from different sources\n * Perform checks to prevent non-compatible parameters\n * Extend parameters that need a list with one element per dimension\n \"\"\"\n\n # Compile parameters\n p = _get_fixed_params(im1) + get_advanced_params()\n p = p + params\n params = p.as_dict()\n\n # Check parameter dimensions\n if isinstance(im1, np.ndarray):\n lt = (list, tuple)\n for key in [ 'FinalGridSpacingInPhysicalUnits',\n 'FinalGridSpacingInVoxels' ]:\n if key in params.keys() and not isinstance(params[key], lt):\n params[key] = [params[key]] * im1.ndim\n\n # Check parameter removal\n if 'FinalGridSpacingInVoxels' in params:\n if 'FinalGridSpacingInPhysicalUnits' in params:\n params.pop('FinalGridSpacingInPhysicalUnits')\n\n # Done\n return params\n",
"def _write_parameter_file(params):\n \"\"\" Write the parameter file in the format that elaxtix likes.\n \"\"\"\n\n # Get path\n path = os.path.join(get_tempdir(), 'params.txt')\n\n # Define helper function\n def valToStr(val):\n if val in [True, False]:\n return '\"%s\"' % str(val).lower()\n elif isinstance(val, int):\n return str(val)\n elif isinstance(val, float):\n tmp = str(val)\n if not '.' in tmp:\n tmp += '.0'\n return tmp\n elif isinstance(val, str):\n return '\"%s\"' % val\n\n # Compile text\n text = ''\n for key in params:\n val = params[key]\n # Make a string of the values\n if isinstance(val, (list, tuple)):\n vals = [valToStr(v) for v in val]\n val_ = ' '.join(vals)\n else:\n val_ = valToStr(val)\n # Create line and add\n line = '(%s %s)' % (key, val_)\n text += line + '\\n'\n\n # Write text\n f = open(path, 'wb')\n try:\n f.write(text.encode('utf-8'))\n finally:\n f.close()\n\n # Done\n return path\n",
"def _read_image_data( mhd_file):\n \"\"\" Read the resulting image data and return it as a numpy array.\n \"\"\"\n tempdir = get_tempdir()\n\n # Load description from mhd file\n fname = tempdir + '/' + mhd_file\n des = open(fname, 'r').read()\n\n # Get data filename and load raw data\n match = re.findall('ElementDataFile = (.+?)\\n', des)\n fname = tempdir + '/' + match[0]\n data = open(fname, 'rb').read()\n\n # Determine dtype\n match = re.findall('ElementType = (.+?)\\n', des)\n dtype_itk = match[0].upper().strip()\n dtype = DTYPE_ITK2NP.get(dtype_itk, None)\n if dtype is None:\n raise RuntimeError('Unknown ElementType: ' + dtype_itk)\n\n # Create numpy array\n a = np.frombuffer(data, dtype=dtype)\n\n # Determine shape, sampling and origin of the data\n match = re.findall('DimSize = (.+?)\\n', des)\n shape = [int(i) for i in match[0].split(' ')]\n #\n match = re.findall('ElementSpacing = (.+?)\\n', des)\n sampling = [float(i) for i in match[0].split(' ')]\n #\n match = re.findall('Offset = (.+?)\\n', des)\n origin = [float(i) for i in match[0].split(' ')]\n\n # Reverse shape stuff to make z-y-x order\n shape = [s for s in reversed(shape)]\n sampling = [s for s in reversed(sampling)]\n origin = [s for s in reversed(origin)]\n\n # Take vectors/colours into account\n N = np.prod(shape)\n if N != a.size:\n extraDim = int( a.size / N )\n shape = tuple(shape) + (extraDim,)\n sampling = tuple(sampling) + (1.0,)\n origin = tuple(origin) + (0,)\n\n # Check shape\n N = np.prod(shape)\n if N != a.size:\n raise RuntimeError('Cannot apply shape to data.')\n else:\n a.shape = shape\n a = Image(a)\n a.sampling = sampling\n a.origin = origin\n return a\n"
] |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_write_image_data
|
python
|
def _write_image_data(im, id):
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
|
Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L557-L632
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_read_image_data
|
python
|
def _read_image_data( mhd_file):
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
|
Read the resulting image data and return it as a numpy array.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L635-L691
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_get_fixed_params
|
python
|
def _get_fixed_params(im):
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
|
Parameters that the user has no influence on. Mostly chosen
bases on the input images.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L751-L774
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
get_advanced_params
|
python
|
def get_advanced_params():
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
|
Get `Parameters` struct with parameters that most users do not
want to think about.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L777-L820
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
get_default_params
|
python
|
def get_default_params(type='BSPLINE'):
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
|
get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L823-L938
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_compile_params
|
python
|
def _compile_params(params, im1):
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
|
Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L941-L967
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
almarklein/pyelastix
|
pyelastix.py
|
_write_parameter_file
|
python
|
def _write_parameter_file(params):
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
Write the parameter file in the format that elaxtix likes.
|
train
|
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L970-L1013
| null |
# Copyright (c) 2010-2016, Almar Klein
# This code is subject to the MIT license
"""
PyElastix - Python wrapper for the Elastix nonrigid registration toolkit
This Python module wraps the Elastix registration toolkit. For it to
work, the Elastix command line application needs to be installed on
your computer. You can obtain a copy at http://elastix.isi.uu.nl/.
Further, this module depends on numpy.
https://github.com/almarklein/pyelastix
"""
from __future__ import print_function, division
__version__ = '1.1'
import os
import re
import sys
import time
import ctypes
import tempfile
import threading
import subprocess
import numpy as np
# %% Code for determining whether a pid is active
# taken from: http://www.madebuild.org/blog/?p=30
# GetExitCodeProcess uses a special exit code to indicate that the process is
# still running.
_STILL_ACTIVE = 259
def _is_pid_running(pid):
"""Get whether a process with the given pid is currently running.
"""
if sys.platform.startswith("win"):
return _is_pid_running_on_windows(pid)
else:
return _is_pid_running_on_unix(pid)
def _is_pid_running_on_unix(pid):
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _is_pid_running_on_windows(pid):
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = (
kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0)
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == _STILL_ACTIVE
# %% Code for detecting the executablews
def _find_executables(name):
""" Try to find an executable.
"""
exe_name = name + '.exe' * sys.platform.startswith('win')
env_path = os.environ.get(name.upper()+ '_PATH', '')
possible_locations = []
def add(*dirs):
for d in dirs:
if d and d not in possible_locations and os.path.isdir(d):
possible_locations.append(d)
# Get list of possible locations
add(env_path)
try:
add(os.path.dirname(os.path.abspath(__file__)))
except NameError: # __file__ may not exist
pass
add(os.path.dirname(sys.executable))
add(os.path.expanduser('~'))
# Platform specific possible locations
if sys.platform.startswith('win'):
add('c:\\program files', os.environ.get('PROGRAMFILES'),
'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)'))
else:
possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin'])
def do_check_version(exe):
try:
return subprocess.check_output([exe, '--version']).decode().strip()
except Exception:
# print('not a good exe', exe)
return False
# If env path is the exe itself ...
if os.path.isfile(env_path):
ver = do_check_version(env_path)
if ver:
return env_path, ver
# First try to find obvious locations
for d in possible_locations:
for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]:
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
# Maybe the exe is on the PATH
ver = do_check_version(exe_name)
if ver:
return exe_name, ver
# Try harder
for d in possible_locations:
for sub in reversed(sorted(os.listdir(d))):
if sub.startswith(name):
exe = os.path.join(d, sub, exe_name)
if os.path.isfile(exe):
ver = do_check_version(exe)
if ver:
return exe, ver
return None, None
EXES = []
def get_elastix_exes():
""" Get the executables for elastix and transformix. Raises an error
if they cannot be found.
"""
if EXES:
if EXES[0]:
return EXES
else:
raise RuntimeError('No Elastix executable.')
# Find exe
elastix, ver = _find_executables('elastix')
if elastix:
base, ext = os.path.splitext(elastix)
base = os.path.dirname(base)
transformix = os.path.join(base, 'transformix' + ext)
EXES.extend([elastix, transformix])
print('Found %s in %r' % (ver, elastix))
return EXES
else:
raise RuntimeError('Could not find Elastix executable. Download '
'Elastix from http://elastix.isi.uu.nl/. Pyelastix '
'looks for the exe in a series of common locations. '
'Set ELASTIX_PATH if necessary.')
# %% Code for maintaing the temp dirs
def _clear_dir(dirName):
""" Remove a directory and it contents. Ignore any failures.
"""
# If we got here, clear dir
for fname in os.listdir(dirName):
try:
os.remove( os.path.join(dirName, fname) )
except Exception:
pass
try:
os.rmdir(dirName)
except Exception:
pass
def get_tempdir():
""" Get the temporary directory where pyelastix stores its temporary
files. The directory is specific to the current process and the
calling thread. Generally, the user does not need this; directories
are automatically cleaned up. Though Elastix log files are also
written here.
"""
tempdir = os.path.join(tempfile.gettempdir(), 'pyelastix')
# Make sure it exists
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
# Clean up all directories for which the process no longer exists
for fname in os.listdir(tempdir):
dirName = os.path.join(tempdir, fname)
# Check if is right kind of dir
if not (os.path.isdir(dirName) and fname.startswith('id_')):
continue
# Get pid and check if its running
try:
pid = int(fname.split('_')[1])
except Exception:
continue
if not _is_pid_running(pid):
_clear_dir(dirName)
# Select dir that included process and thread id
tid = id(threading.current_thread() if hasattr(threading, 'current_thread')
else threading.currentThread())
dir = os.path.join(tempdir, 'id_%i_%i' % (os.getpid(), tid))
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def _clear_temp_dir():
""" Clear the temporary directory.
"""
tempdir = get_tempdir()
for fname in os.listdir(tempdir):
try:
os.remove( os.path.join(tempdir, fname) )
except Exception:
pass
def _get_image_paths(im1, im2):
""" If the images are paths to a file, checks whether the file exist
and return the paths. If the images are numpy arrays, writes them
to disk and returns the paths of the new files.
"""
paths = []
for im in [im1, im2]:
if im is None:
# Groupwise registration: only one image (ndim+1 dimensions)
paths.append(paths[0])
continue
if isinstance(im, str):
# Given a location
if os.path.isfile(im1):
paths.append(im)
else:
raise ValueError('Image location does not exist.')
elif isinstance(im, np.ndarray):
# Given a numpy array
id = len(paths)+1
p = _write_image_data(im, id)
paths.append(p)
else:
# Given something else ...
raise ValueError('Invalid input image.')
# Done
return tuple(paths)
# %% Some helper stuff
def _system3(cmd, verbose=False):
""" Execute the given command in a subprocess and wait for it to finish.
A thread is run that prints output of the process if verbose is True.
"""
# Init flag
interrupted = False
# Create progress
if verbose > 0:
progress = Progress()
stdout = []
def poll_process(p):
while not interrupted:
msg = p.stdout.readline().decode()
if msg:
stdout.append(msg)
if 'error' in msg.lower():
print(msg.rstrip())
if verbose == 1:
progress.reset()
elif verbose > 1:
print(msg.rstrip())
elif verbose == 1:
progress.update(msg)
else:
break
time.sleep(0.01)
#print("thread exit")
# Start process that runs the command
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Keep reading stdout from it
# thread.start_new_thread(poll_process, (p,)) Python 2.x
my_thread = threading.Thread(target=poll_process, args=(p,))
my_thread.setDaemon(True)
my_thread.start()
# Wait here
try:
while p.poll() is None:
time.sleep(0.01)
except KeyboardInterrupt:
# Set flag
interrupted = True
# Kill subprocess
pid = p.pid
if hasattr(os,'kill'):
import signal
os.kill(pid, signal.SIGKILL)
elif sys.platform.startswith('win'):
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
kernel32.TerminateProcess(handle, 0)
#os.system("TASKKILL /PID " + str(pid) + " /F")
# All good?
if interrupted:
raise RuntimeError('Registration process interrupted by the user.')
if p.returncode:
stdout.append(p.stdout.read().decode())
print(''.join(stdout))
raise RuntimeError('An error occured during the registration.')
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
DTYPE_NP2ITK, DTYPE_ITK2NP = _get_dtype_maps()
class Progress:
def __init__(self):
self._level = 0
self.reset()
def update(self, s):
# Detect resolution
if s.startswith('Resolution:'):
self._level = self.get_int( s.split(':')[1] )
# Check if nr
if '\t' in s:
iter = self.get_int( s.split('\t',1)[0] )
if iter:
self.show_progress(iter)
def get_int(self, s):
nr = 0
try:
nr = int(s)
except Exception:
pass
return nr
def reset(self):
self._message = ''
print()
def show_progress(self, iter):
# Remove previous message
rem = '\b' * (len(self._message)+1)
# Create message, and print
self._message = 'resolution %i, iter %i' % (self._level, iter)
print(rem + self._message)
# %% The Elastix registration class
def register(im1, im2, params, exact_params=False, verbose=1):
""" register(im1, im2, params, exact_params=False, verbose=1)
Perform the registration of `im1` to `im2`, using the given
parameters. Returns `(im1_deformed, field)`, where `field` is a
tuple with arrays describing the deformation for each dimension
(x-y-z order, in world units).
Parameters:
* im1 (ndarray or file location):
The moving image (the one to deform).
* im2 (ndarray or file location):
The static (reference) image.
* params (dict or Parameters):
The parameters of the registration. Default parameters can be
obtained using the `get_default_params()` method. Note that any
parameter known to Elastix can be added to the parameter
struct, which enables tuning the registration in great detail.
See `get_default_params()` and the Elastix docs for more info.
* exact_params (bool):
If True, use the exact given parameters. If False (default)
will process the parameters, checking for incompatible
parameters, extending values to lists if a value needs to be
given for each dimension.
* verbose (int):
Verbosity level. If 0, will not print any progress. If 1, will
print the progress only. If 2, will print the full output
produced by the Elastix executable. Note that error messages
produced by Elastix will be printed regardless of the verbose
level.
If `im1` is a list of images, performs a groupwise registration.
In this case the resulting `field` is a list of fields, each
indicating the deformation to the "average" image.
"""
# Clear dir
tempdir = get_tempdir()
_clear_temp_dir()
# Reference image
refIm = im1
if isinstance(im1, (tuple,list)):
refIm = im1[0]
# Check parameters
if not exact_params:
params = _compile_params(params, refIm)
if isinstance(params, Parameters):
params = params.as_dict()
# Groupwise?
if im2 is None:
# todo: also allow using a constraint on the "last dimension"
if not isinstance(im1, (tuple,list)):
raise ValueError('im2 is None, but im1 is not a list.')
#
ims = im1
ndim = ims[0].ndim
# Create new image that is a combination of all images
N = len(ims)
new_shape = (N,) + ims[0].shape
im1 = np.zeros(new_shape, ims[0].dtype)
for i in range(N):
im1[i] = ims[i]
# Set parameters
#params['UseCyclicTransform'] = True # to be chosen by user
params['FixedImageDimension'] = im1.ndim
params['MovingImageDimension'] = im1.ndim
params['FixedImagePyramid'] = 'FixedSmoothingImagePyramid'
params['MovingImagePyramid'] = 'MovingSmoothingImagePyramid'
params['Metric'] = 'VarianceOverLastDimensionMetric'
params['Transform'] = 'BSplineStackTransform'
params['Interpolator'] = 'ReducedDimensionBSplineInterpolator'
params['SampleLastDimensionRandomly'] = True
params['NumSamplesLastDimension'] = 5
params['SubtractMean'] = True
# No smoothing along that dimenson
pyramidsamples = []
for i in range(params['NumberOfResolutions']):
pyramidsamples.extend( [0]+[2**i]*ndim )
pyramidsamples.reverse()
params['ImagePyramidSchedule'] = pyramidsamples
# Get paths of input images
path_im1, path_im2 = _get_image_paths(im1, im2)
# Determine path of parameter file and write params
path_params = _write_parameter_file(params)
# Get path of trafo param file
path_trafo_params = os.path.join(tempdir, 'TransformParameters.0.txt')
# Register
if True:
# Compile command to execute
command = [get_elastix_exes()[0],
'-m', path_im1, '-f', path_im2,
'-out', tempdir, '-p', path_params]
if verbose:
print("Calling Elastix to register images ...")
_system3(command, verbose)
# Try and load result
try:
a = _read_image_data('result.0.mhd')
except IOError as why:
tmp = "An error occured during registration: " + str(why)
raise RuntimeError(tmp)
# Find deformation field
if True:
# Compile command to execute
command = [get_elastix_exes()[1],
'-def', 'all', '-out', tempdir, '-tp', path_trafo_params]
_system3(command, verbose)
# Try and load result
try:
b = _read_image_data('deformationField.mhd')
except IOError as why:
tmp = "An error occured during transformation: " + str(why)
raise RuntimeError(tmp)
# Get deformation fields (for each image)
if im2 is None:
fields = [b[i] for i in range(b.shape[0])]
else:
fields = [b]
# Pull apart deformation fields in multiple images
for i in range(len(fields)):
field = fields[i]
if field.ndim == 2:
field = [field[:,d] for d in range(1)]
elif field.ndim == 3:
field = [field[:,:,d] for d in range(2)]
elif field.ndim == 4:
field = [field[:,:,:,d] for d in range(3)]
elif field.ndim == 5:
field = [field[:,:,:,:,d] for d in range(4)]
fields[i] = tuple(field)
if im2 is not None:
fields = fields[0] # For pairwise reg, return 1 field, not a list
# Clean and return
_clear_temp_dir()
return a, fields
def _write_image_data(im, id):
""" Write a numpy array to disk in the form of a .raw and .mhd file.
The id is the image sequence number (1 or 2). Returns the path of
the mhd file.
"""
im = im* (1.0/3000)
# Create text
lines = [ "ObjectType = Image",
"NDims = <ndim>",
"BinaryData = True",
"BinaryDataByteOrderMSB = False",
"CompressedData = False",
#"TransformMatrix = <transmatrix>",
"Offset = <origin>",
"CenterOfRotation = <centrot>",
"ElementSpacing = <sampling>",
"DimSize = <shape>",
"ElementType = <dtype>",
"ElementDataFile = <fname>",
"" ]
text = '\n'.join(lines)
# Determine file names
tempdir = get_tempdir()
fname_raw_ = 'im%i.raw' % id
fname_raw = os.path.join(tempdir, fname_raw_)
fname_mhd = os.path.join(tempdir, 'im%i.mhd' % id)
# Get shape, sampling and origin
shape = im.shape
if hasattr(im, 'sampling'): sampling = im.sampling
else: sampling = [1 for s in im.shape]
if hasattr(im, 'origin'): origin = im.origin
else: origin = [0 for s in im.shape]
# Make all shape stuff in x-y-z order and make it string
shape = ' '.join([str(s) for s in reversed(shape)])
sampling = ' '.join([str(s) for s in reversed(sampling)])
origin = ' '.join([str(s) for s in reversed(origin)])
# Get data type
dtype_itk = DTYPE_NP2ITK.get(im.dtype.name, None)
if dtype_itk is None:
raise ValueError('Cannot convert data of this type: '+ str(im.dtype))
# Set mhd text
text = text.replace('<fname>', fname_raw_)
text = text.replace('<ndim>', str(im.ndim))
text = text.replace('<shape>', shape)
text = text.replace('<sampling>', sampling)
text = text.replace('<origin>', origin)
text = text.replace('<dtype>', dtype_itk)
text = text.replace('<centrot>', ' '.join(['0' for s in im.shape]))
if im.ndim==2:
text = text.replace('<transmatrix>', '1 0 0 1')
elif im.ndim==3:
text = text.replace('<transmatrix>', '1 0 0 0 1 0 0 0 1')
elif im.ndim==4:
pass # ???
# Write data file
f = open(fname_raw, 'wb')
try:
f.write(im.data)
finally:
f.close()
# Write mhd file
f = open(fname_mhd, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done, return path of mhd file
return fname_mhd
def _read_image_data( mhd_file):
""" Read the resulting image data and return it as a numpy array.
"""
tempdir = get_tempdir()
# Load description from mhd file
fname = tempdir + '/' + mhd_file
des = open(fname, 'r').read()
# Get data filename and load raw data
match = re.findall('ElementDataFile = (.+?)\n', des)
fname = tempdir + '/' + match[0]
data = open(fname, 'rb').read()
# Determine dtype
match = re.findall('ElementType = (.+?)\n', des)
dtype_itk = match[0].upper().strip()
dtype = DTYPE_ITK2NP.get(dtype_itk, None)
if dtype is None:
raise RuntimeError('Unknown ElementType: ' + dtype_itk)
# Create numpy array
a = np.frombuffer(data, dtype=dtype)
# Determine shape, sampling and origin of the data
match = re.findall('DimSize = (.+?)\n', des)
shape = [int(i) for i in match[0].split(' ')]
#
match = re.findall('ElementSpacing = (.+?)\n', des)
sampling = [float(i) for i in match[0].split(' ')]
#
match = re.findall('Offset = (.+?)\n', des)
origin = [float(i) for i in match[0].split(' ')]
# Reverse shape stuff to make z-y-x order
shape = [s for s in reversed(shape)]
sampling = [s for s in reversed(sampling)]
origin = [s for s in reversed(origin)]
# Take vectors/colours into account
N = np.prod(shape)
if N != a.size:
extraDim = int( a.size / N )
shape = tuple(shape) + (extraDim,)
sampling = tuple(sampling) + (1.0,)
origin = tuple(origin) + (0,)
# Check shape
N = np.prod(shape)
if N != a.size:
raise RuntimeError('Cannot apply shape to data.')
else:
a.shape = shape
a = Image(a)
a.sampling = sampling
a.origin = origin
return a
class Image(np.ndarray):
def __new__(cls, array):
try:
ob = array.view(cls)
except AttributeError: # pragma: no cover
# Just return the original; no metadata on the array in Pypy!
return array
return ob
# %% Code related to parameters
class Parameters:
""" Struct object to represent the parameters for the Elastix
registration toolkit. Sets of parameters can be combined by
addition. (When adding `p1 + p2`, any parameters present in both
objects will take the value that the parameter has in `p2`.)
Use `get_default_params()` to get a Parameters struct with sensible
default values.
"""
def as_dict(self):
""" Returns the parameters as a dictionary.
"""
tmp = {}
tmp.update(self.__dict__)
return tmp
def __repr__(self):
return '<Parameters instance with %i parameters>' % len(self.__dict__)
def __str__(self):
# Get alignment value
c = 0
for key in self.__dict__:
c = max(c, len(key))
# How many chars left (to print on less than 80 lines)
charsLeft = 79 - (c+6)
s = '<%i parameters>\n' % len(self.__dict__)
for key in self.__dict__.keys():
valuestr = repr(self.__dict__[key])
if len(valuestr) > charsLeft:
valuestr = valuestr[:charsLeft-3] + '...'
s += key.rjust(c+4) + ": %s\n" % (valuestr)
return s
def __add__(self, other):
p = Parameters()
p.__dict__.update(self.__dict__)
p.__dict__.update(other.__dict__)
return p
def _get_fixed_params(im):
""" Parameters that the user has no influence on. Mostly chosen
bases on the input images.
"""
p = Parameters()
if not isinstance(im, np.ndarray):
return p
# Dimension of the inputs
p.FixedImageDimension = im.ndim
p.MovingImageDimension = im.ndim
# Always write result, so I can verify
p.WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK[im.dtype.name]
p.ResultImagePixelType = tmp.split('_')[-1].lower()
p.ResultImageFormat = "mhd"
# Done
return p
def get_advanced_params():
""" Get `Parameters` struct with parameters that most users do not
want to think about.
"""
p = Parameters()
# Internal format used during the registration process
p.FixedInternalImagePixelType = "float"
p.MovingInternalImagePixelType = "float"
# Image direction
p.UseDirectionCosines = True
# In almost all cases you'd want multi resolution
p.Registration = 'MultiResolutionRegistration'
# Pyramid options
# *RecursiveImagePyramid downsamples the images
# *SmoothingImagePyramid does not downsample
p.FixedImagePyramid = "FixedRecursiveImagePyramid"
p.MovingImagePyramid = "MovingRecursiveImagePyramid"
# Whether transforms are combined by composition or by addition.
# It does not influence the results very much.
p.HowToCombineTransforms = "Compose"
# For out of range pixels
p.DefaultPixelValue = 0
# Interpolator used during interpolation and its order
# 1 means linear interpolation, 3 means cubic.
p.Interpolator = "BSplineInterpolator"
p.BSplineInterpolationOrder = 1
# Interpolator used during interpolation of final level, and its order
p.ResampleInterpolator = "FinalBSplineInterpolator"
p.FinalBSplineInterpolationOrder = 3
# According to the manual, there is currently only one resampler
p.Resampler = "DefaultResampler"
# Done
return p
def get_default_params(type='BSPLINE'):
""" get_default_params(type='BSPLINE')
Get `Parameters` struct with parameters that users may want to tweak.
The given `type` specifies the type of allowed transform, and can
be 'RIGID', 'AFFINE', 'BSPLINE'.
For detail on what parameters are available and how they should be used,
we refer to the Elastix documentation. Here is a description of the
most common parameters:
* Transform (str):
Can be 'BSplineTransform', 'EulerTransform', or
'AffineTransform'. The transformation to apply. Chosen based on `type`.
* FinalGridSpacingInPhysicalUnits (int):
When using the BSplineTransform, the final spacing of the grid.
This controls the smoothness of the final deformation.
* AutomaticScalesEstimation (bool):
When using a rigid or affine transform. Scales the affine matrix
elements compared to the translations, to make sure they are in
the same range. In general, it's best to use automatic scales
estimation.
* AutomaticTransformInitialization (bool):
When using a rigid or affine transform. Automatically guess an
initial translation by aligning the geometric centers of the
fixed and moving.
* NumberOfResolutions (int):
Most registration algorithms adopt a multiresolution approach
to direct the solution towards a global optimum and to speed
up the process. This parameter specifies the number of scales
to apply the registration at. (default 4)
* MaximumNumberOfIterations (int):
Maximum number of iterations in each resolution level.
200-2000 works usually fine for nonrigid registration.
The more, the better, but the longer computation time.
This is an important parameter! (default 500).
"""
# Init
p = Parameters()
type = type.upper()
# ===== Metric to use =====
p.Metric = 'AdvancedMattesMutualInformation'
# Number of grey level bins in each resolution level,
# for the mutual information. 16 or 32 usually works fine.
# sets default value for NumberOf[Fixed/Moving]HistogramBins
p.NumberOfHistogramBins = 32
# Taking samples for mutual information
p.ImageSampler = 'RandomCoordinate'
p.NumberOfSpatialSamples = 2048
p.NewSamplesEveryIteration = True
# ====== Transform to use ======
# The number of levels in the image pyramid
p.NumberOfResolutions = 4
if type in ['B', 'BSPLINE', 'B-SPLINE']:
# Bspline transform
p.Transform = 'BSplineTransform'
# The final grid spacing (at the smallest level)
p.FinalGridSpacingInPhysicalUnits = 16
if type in ['RIGID', 'EULER', 'AFFINE']:
# Affine or Euler transform
if type in ['RIGID', 'EULER']:
p.Transform = 'EulerTransform'
else:
p.Transform = 'AffineTransform'
# Scales the affine matrix elements compared to the translations,
# to make sure they are in the same range. In general, it's best to
# use automatic scales estimation.
p.AutomaticScalesEstimation = True
# Automatically guess an initial translation by aligning the
# geometric centers of the fixed and moving.
p.AutomaticTransformInitialization = True
# ===== Optimizer to use =====
p.Optimizer = 'AdaptiveStochasticGradientDescent'
# Maximum number of iterations in each resolution level:
# 200-2000 works usually fine for nonrigid registration.
# The more, the better, but the longer computation time.
# This is an important parameter!
p.MaximumNumberOfIterations = 500
# The step size of the optimizer, in mm. By default the voxel size is used.
# which usually works well. In case of unusual high-resolution images
# (eg histology) it is necessary to increase this value a bit, to the size
# of the "smallest visible structure" in the image:
#p.MaximumStepLength = 1.0 Default uses voxel spaceing
# Another optional parameter for the AdaptiveStochasticGradientDescent
#p.SigmoidInitialTime = 4.0
# ===== Also interesting parameters =====
#p.FinalGridSpacingInVoxels = 16
#p.GridSpacingSchedule = [4.0, 4.0, 2.0, 1.0]
#p.ImagePyramidSchedule = [8 8 4 4 2 2 1 1]
#p.ErodeMask = "false"
# Done
return p
def _compile_params(params, im1):
""" Compile the params dictionary:
* Combine parameters from different sources
* Perform checks to prevent non-compatible parameters
* Extend parameters that need a list with one element per dimension
"""
# Compile parameters
p = _get_fixed_params(im1) + get_advanced_params()
p = p + params
params = p.as_dict()
# Check parameter dimensions
if isinstance(im1, np.ndarray):
lt = (list, tuple)
for key in [ 'FinalGridSpacingInPhysicalUnits',
'FinalGridSpacingInVoxels' ]:
if key in params.keys() and not isinstance(params[key], lt):
params[key] = [params[key]] * im1.ndim
# Check parameter removal
if 'FinalGridSpacingInVoxels' in params:
if 'FinalGridSpacingInPhysicalUnits' in params:
params.pop('FinalGridSpacingInPhysicalUnits')
# Done
return params
def _write_parameter_file(params):
""" Write the parameter file in the format that elaxtix likes.
"""
# Get path
path = os.path.join(get_tempdir(), 'params.txt')
# Define helper function
def valToStr(val):
if val in [True, False]:
return '"%s"' % str(val).lower()
elif isinstance(val, int):
return str(val)
elif isinstance(val, float):
tmp = str(val)
if not '.' in tmp:
tmp += '.0'
return tmp
elif isinstance(val, str):
return '"%s"' % val
# Compile text
text = ''
for key in params:
val = params[key]
# Make a string of the values
if isinstance(val, (list, tuple)):
vals = [valToStr(v) for v in val]
val_ = ' '.join(vals)
else:
val_ = valToStr(val)
# Create line and add
line = '(%s %s)' % (key, val_)
text += line + '\n'
# Write text
f = open(path, 'wb')
try:
f.write(text.encode('utf-8'))
finally:
f.close()
# Done
return path
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
install
|
python
|
def install(application, io_loop=None, **kwargs):
if getattr(application, 'amqp', None) is not None:
LOGGER.warning('AMQP is already installed')
return False
kwargs.setdefault('io_loop', io_loop)
# Support AMQP_* and RABBITMQ_* variables
for prefix in {'AMQP', 'RABBITMQ'}:
key = '{}_URL'.format(prefix)
if os.environ.get(key) is not None:
LOGGER.debug('Setting URL to %s', os.environ[key])
kwargs.setdefault('url', os.environ[key])
key = '{}_CONFIRMATIONS'.format(prefix)
if os.environ.get(key) is not None:
value = os.environ[key].lower() in {'true', '1'}
LOGGER.debug('Setting enable_confirmations to %s', value)
kwargs.setdefault('enable_confirmations', value)
key = '{}_CONNECTION_ATTEMPTS'.format(prefix)
if os.environ.get(key) is not None:
value = int(os.environ[key])
LOGGER.debug('Setting connection_attempts to %s', value)
kwargs.setdefault('connection_attempts', value)
key = '{}_RECONNECT_DELAY'.format(prefix)
if os.environ.get(key) is not None:
value = float(os.environ[key])
LOGGER.debug('Setting reconnect_delay to %s', value)
kwargs.setdefault('reconnect_delay', value)
# Set the default AMQP app_id property
if application.settings.get('service') and \
application.settings.get('version'):
default_app_id = '{}/{}'.format(
application.settings['service'], application.settings['version'])
else:
default_app_id = 'sprockets.mixins.amqp/{}'.format(__version__)
kwargs.setdefault('default_app_id', default_app_id)
# Default the default URL value if not already set
kwargs.setdefault('url', 'amqp://guest:guest@localhost:5672/%2f')
LOGGER.debug('kwargs: %r', kwargs)
setattr(application, 'amqp', Client(**kwargs))
return True
|
Call this to install AMQP for the Tornado application. Additional
keyword arguments are passed through to the constructor of the AMQP
object.
:param tornado.web.Application application: The tornado application
:param tornado.ioloop.IOLoop io_loop: The current IOLoop.
:rtype: bool
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L42-L98
| null |
"""The PublishingMixin adds RabbitMQ publishing capabilities to a request
handler, with methods to speed the development of publishing RabbitMQ messages.
Configured using the following environment variables:
``AMQP_URL`` - The AMQP URL to connect to.
``AMQP_TIMEOUT`` - The optional maximum time to wait for a bad state
to resolve before treating the failure as
persistent.
``AMQP_RECONNECT_DELAY`` - The optional time in seconds to wait before
reconnecting on connection failure.
``AMQP_CONNECTION_ATTEMPTS`` - The optional number of connection
attempts to make before giving up.
The ``AMQP``` prefix is interchangeable with ``RABBITMQ``. For example, you can
use ``AMQP_URL`` or ``RABBITMQ_URL``.
"""
import os
import logging
import sys
import time
import uuid
try:
from tornado import concurrent, ioloop
from pika import exceptions
import pika
except ImportError: # pragma: nocover
sys.stderr.write('setup.py import error compatibility objects created\n')
concurrent, ioloop, exceptions, pika = \
object(), object(), object(), object()
__version__ = '2.1.4'
LOGGER = logging.getLogger(__name__)
DEFAULT_RECONNECT_DELAY = 5
DEFAULT_CONNECTION_ATTEMPTS = 3
class PublishingMixin(object):
"""This mixin adds publishing messages to RabbitMQ. It uses a
persistent connection and channel opened when the application
start up and automatically reopened if closed by RabbitMQ
"""
def amqp_publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ
:param str exchange: The exchange to publish the message to
:param str routing_key: The routing key to publish the message with
:param bytes body: The message body to send
:param dict properties: An optional dict of AMQP properties
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.AMQPError`
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
properties = properties or {}
if hasattr(self, 'correlation_id') and getattr(self, 'correlation_id'):
properties.setdefault('correlation_id', self.correlation_id)
return self.application.amqp.publish(
exchange, routing_key, body, properties)
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
class AMQPException(Exception):
"""Base Class for the the AMQP client"""
fmt = 'AMQP Exception ({}): {}'
def __init__(self, *args):
super(AMQPException, self).__init__(*args)
self._args = args
def __str__(self):
return self.fmt.format(*self._args)
class ConnectionStateError(AMQPException):
"""Invoked when reconnect is attempted but the state is incorrect"""
fmt = 'Attempted to close the connection while {}'
class NotReadyError(AMQPException):
"""Raised if the :meth:`Client.publish` is invoked and the connection is
not ready for publishing.
"""
fmt = 'Connection is {} when publishing message {}'
class PublishingFailure(AMQPException):
"""Raised if the :meth:`Client.publish` is invoked and an error occurs or
the message delivery is not confirmed.
"""
fmt = 'Message {} was not routed to its intended destination ({}, {}): {}'
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
PublishingMixin.amqp_publish
|
python
|
def amqp_publish(self, exchange, routing_key, body, properties=None):
properties = properties or {}
if hasattr(self, 'correlation_id') and getattr(self, 'correlation_id'):
properties.setdefault('correlation_id', self.correlation_id)
return self.application.amqp.publish(
exchange, routing_key, body, properties)
|
Publish a message to RabbitMQ
:param str exchange: The exchange to publish the message to
:param str routing_key: The routing key to publish the message with
:param bytes body: The message body to send
:param dict properties: An optional dict of AMQP properties
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.AMQPError`
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L108-L126
| null |
class PublishingMixin(object):
"""This mixin adds publishing messages to RabbitMQ. It uses a
persistent connection and channel opened when the application
start up and automatically reopened if closed by RabbitMQ
"""
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.publish
|
python
|
def publish(self, exchange, routing_key, body, properties=None):
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
|
Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L216-L257
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_delivery_confirmation
|
python
|
def on_delivery_confirmation(self, method_frame):
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
|
Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L259-L295
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.connect
|
python
|
def connect(self):
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
|
This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L371-L388
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.close
|
python
|
def close(self):
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
|
Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L390-L401
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client._reconnect
|
python
|
def _reconnect(self):
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
|
Schedule the next connection attempt if the class is not currently
closing.
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L412-L422
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_connection_open
|
python
|
def on_connection_open(self, connection):
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
|
This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L428-L441
|
[
"def _open_channel(self):\n \"\"\"Open a new channel with RabbitMQ.\n\n :rtype: pika.channel.Channel\n\n \"\"\"\n LOGGER.debug('Creating a new channel')\n return self.connection.channel(self.on_channel_open)\n"
] |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_connection_open_error
|
python
|
def on_connection_open_error(self, connection, error):
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
|
Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L443-L453
|
[
"def _reconnect(self):\n \"\"\"Schedule the next connection attempt if the class is not currently\n closing.\n\n \"\"\"\n if self.idle or self.closed:\n LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',\n self.reconnect_delay)\n self.io_loop.call_later(self.reconnect_delay, self.connect)\n return\n LOGGER.warning('Reconnect called while %s', self.state_description)\n"
] |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_connection_blocked
|
python
|
def on_connection_blocked(self, method_frame):
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L466-L476
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_connection_unblocked
|
python
|
def on_connection_unblocked(self, method_frame):
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
|
When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L478-L488
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_connection_closed
|
python
|
def on_connection_closed(self, connection, reply_code, reply_text):
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
|
This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L490-L512
|
[
"def _reconnect(self):\n \"\"\"Schedule the next connection attempt if the class is not currently\n closing.\n\n \"\"\"\n if self.idle or self.closed:\n LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',\n self.reconnect_delay)\n self.io_loop.call_later(self.reconnect_delay, self.connect)\n return\n LOGGER.warning('Reconnect called while %s', self.state_description)\n"
] |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_basic_return
|
python
|
def on_basic_return(self, _channel, method, properties, body):
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
|
Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L518-L535
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_channel_open
|
python
|
def on_channel_open(self, channel):
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
|
This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L541-L557
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_channel_closed
|
python
|
def on_channel_closed(self, channel, reply_code, reply_text):
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
|
Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L559-L586
|
[
"def _open_channel(self):\n \"\"\"Open a new channel with RabbitMQ.\n\n :rtype: pika.channel.Channel\n\n \"\"\"\n LOGGER.debug('Creating a new channel')\n return self.connection.channel(self.on_channel_open)\n"
] |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_flow(self, method):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
"""
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
sprockets/sprockets.mixins.amqp
|
sprockets/mixins/amqp/__init__.py
|
Client.on_channel_flow
|
python
|
def on_channel_flow(self, method):
if method.active:
LOGGER.info('Channel flow is active (READY)')
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
else:
LOGGER.warning('Channel flow is inactive (BLOCKED)')
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
|
When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.spec.Channel.Flow method: The Channel flow frame
|
train
|
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L588-L604
| null |
class Client(object):
"""This class encompasses all of the AMQP/RabbitMQ specific behaviors.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
STATE_IDLE = 0x01
STATE_CONNECTING = 0x02
STATE_READY = 0x03
STATE_BLOCKED = 0x04
STATE_CLOSING = 0x05
STATE_CLOSED = 0x06
STATE_DESC = {
0x01: 'Idle',
0x02: 'Connecting',
0x03: 'Ready',
0x04: 'Blocked',
0x05: 'Closing',
0x06: 'Closed'}
def __init__(self,
url,
enable_confirmations=True,
reconnect_delay=DEFAULT_RECONNECT_DELAY,
connection_attempts=DEFAULT_CONNECTION_ATTEMPTS,
default_app_id=None,
on_ready_callback=None,
on_unavailable_callback=None,
on_return_callback=None,
io_loop=None):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str url: The AMQP URL to connect to
:param bool enable_confirmations: Enable Publisher Confirmations
:param int reconnect_delay: The optional time in seconds to wait before
reconnecting on connection failure.
:param int connection_attempts: The optional number of connection
attempts to make before giving up.
:param str default_app_id: The default AMQP application ID
:param callable on_ready_callback: The optional callback to call when
the connection to RabbitMQ has been established and is ready.
:param callable on_unavailable_callback: The optional callback to call
when the connection to the AMQP server becomes unavailable.
:param callable on_return_callback: The optional callback
that is invoked if a message is returned because it is unroutable
:param tornado.ioloop.IOLoop io_loop: An optional IOLoop to override
the default with.
:raises: ValueError
"""
if not int(connection_attempts):
raise ValueError(
'Invalid connection_attempts value: {}'.format(
connection_attempts))
if not float(reconnect_delay):
raise ValueError(
'Invalid reconnect_delay value: {}'.format(reconnect_delay))
self.state = self.STATE_IDLE
self.io_loop = io_loop or ioloop.IOLoop.current()
self.channel = None
self.connection = None
self.connection_attempts = int(connection_attempts)
self.default_app_id = default_app_id
self.message_number = 0
self.messages = {}
self.on_ready = on_ready_callback
self.on_return = on_return_callback
self.on_unavailable = on_unavailable_callback
self.publisher_confirmations = enable_confirmations
self.reconnect_delay = float(reconnect_delay)
self.url = url
self.parameters = pika.URLParameters(url)
self.parameters.connection_attempts = self.connection_attempts
# Automatically start the RabbitMQ connection on creation
self.connect()
def publish(self, exchange, routing_key, body, properties=None):
"""Publish a message to RabbitMQ. If the RabbitMQ connection is not
established or is blocked, attempt to wait until sending is possible.
:param str exchange: The exchange to publish the message to.
:param str routing_key: The routing key to publish the message with.
:param bytes body: The message body to send.
:param dict properties: An optional dict of additional properties
to append.
:rtype: tornado.concurrent.Future
:raises: :exc:`sprockets.mixins.amqp.NotReadyError`
:raises: :exc:`sprockets.mixins.amqp.PublishingError`
"""
future = concurrent.Future()
properties = properties or {}
properties.setdefault('app_id', self.default_app_id)
properties.setdefault('message_id', str(uuid.uuid4()))
properties.setdefault('timestamp', int(time.time()))
if self.ready:
if self.publisher_confirmations:
self.message_number += 1
self.messages[self.message_number] = future
else:
future.set_result(None)
try:
self.channel.basic_publish(
exchange, routing_key, body,
pika.BasicProperties(**properties), True)
except exceptions.AMQPError as error:
future.set_exception(
PublishingFailure(
properties['message_id'],
exchange, routing_key,
error.__class__.__name__))
else:
future.set_exception(NotReadyError(
self.state_description, properties['message_id']))
return future
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.debug('Received %s for delivery tag: %i',
confirmation_type, method_frame.method.delivery_tag)
if method_frame.method.multiple:
confirmed = sorted([msg for msg in self.messages
if msg <= method_frame.method.delivery_tag])
else:
confirmed = [method_frame.method.delivery_tag]
for msg in confirmed:
LOGGER.debug('RabbitMQ confirmed message %i', msg)
try:
if confirmation_type == 'ack':
self.messages[msg].set_result(None)
elif confirmation_type == 'nack':
self.messages[msg].set_exception(PublishingFailure(msg))
except KeyError:
LOGGER.warning('Tried to confirm a message missing in stack')
else:
del self.messages[msg]
LOGGER.debug('Published %i messages, %i have yet to be confirmed',
self.message_number, len(self.messages))
@property
def idle(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_IDLE
@property
def connecting(self):
"""Returns ``True`` if the connection to RabbitMQ is open and a
channel is in the process of connecting.
:rtype: bool
"""
return self.state == self.STATE_CONNECTING
@property
def blocked(self):
"""Returns ``True`` if the connection is blocked by RabbitMQ.
:rtype: bool
"""
return self.state == self.STATE_BLOCKED
@property
def closable(self):
"""Returns ``True`` if the connection to RabbitMQ can be closed
:rtype: bool
"""
return self.state in [self.STATE_BLOCKED, self.STATE_READY]
@property
def closed(self):
"""Returns ``True`` if the connection to RabbitMQ is closed.
:rtype: bool
"""
return self.state == self.STATE_CLOSED
@property
def closing(self):
"""Returns ``True`` if the connection to RabbitMQ is closing.
:rtype: bool
"""
return self.state == self.STATE_CLOSING
@property
def ready(self):
"""Returns ``True`` if the connection to RabbitMQ is established and
we can publish to it.
:rtype: bool
"""
return self.state == self.STATE_READY
@property
def state_description(self):
"""Return the human understandable state description.
:rtype: str
"""
return self.STATE_DESC[self.state]
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.TornadoConnection
"""
if not self.idle and not self.closed:
raise ConnectionStateError(self.state_description)
LOGGER.debug('Connecting to %s', self.url)
self.state = self.STATE_CONNECTING
self.connection = pika.TornadoConnection(
parameters=self.parameters,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
custom_ioloop=self.io_loop)
def close(self):
"""Cleanly shutdown the connection to RabbitMQ
:raises: sprockets.mixins.amqp.ConnectionStateError
"""
if not self.closable:
LOGGER.warning('Closed called while %s', self.state_description)
raise ConnectionStateError(self.state_description)
self.state = self.STATE_CLOSING
LOGGER.info('Closing RabbitMQ connection')
self.connection.close()
def _open_channel(self):
"""Open a new channel with RabbitMQ.
:rtype: pika.channel.Channel
"""
LOGGER.debug('Creating a new channel')
return self.connection.channel(self.on_channel_open)
def _reconnect(self):
"""Schedule the next connection attempt if the class is not currently
closing.
"""
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description)
#
# Connection event callbacks
#
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel()
def on_connection_open_error(self, connection, error):
"""Invoked if the connection to RabbitMQ can not be made.
:type connection: pika.TornadoConnection
:param Exception error: The exception indicating failure
"""
LOGGER.critical('Could not connect to RabbitMQ (%s): %r',
connection, error)
self.state = self.STATE_CLOSED
self._reconnect()
@staticmethod
def on_back_pressure_detected(obj): # pragma: nocover
"""This method is called by pika if it believes that back pressure is
being applied to the TCP socket.
:param unknown obj: The connection where back pressure
is being applied
"""
LOGGER.warning('Connection back pressure detected: %r', obj)
def on_connection_blocked(self, method_frame):
"""This method is called by pika if RabbitMQ sends a connection blocked
method, to let us know we need to throttle our publishing.
:param pika.amqp_object.Method method_frame: The blocked method frame
"""
LOGGER.warning('Connection blocked: %s', method_frame)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.TornadoConnection connection: Closed connection
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
start_state = self.state
self.state = self.STATE_CLOSED
if self.on_unavailable:
self.on_unavailable(self)
self.connection = None
self.channel = None
if start_state != self.STATE_CLOSING:
LOGGER.warning('%s closed while %s: (%s) %s',
connection, self.state_description,
reply_code, reply_text)
self._reconnect()
#
# Error Condition Callbacks
#
def on_basic_return(self, _channel, method, properties, body):
"""Invoke a registered callback or log the returned message.
:param _channel: The channel the message was sent on
:type _channel: pika.channel.Channel
:param pika.spec.Basic.Return method: The method object
:param pika.spec.BasicProperties properties: The message properties
:param str, unicode, bytes body: The message body
"""
if self.on_return:
self.on_return(method, properties, body)
else:
LOGGER.critical(
'%s message %s published to %s (CID %s) returned: %s',
method.exchange, properties.message_id,
method.routing_key, properties.correlation_id,
method.reply_text)
#
# Channel event callbacks
#
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Channel opened')
self.channel = channel
if self.publisher_confirmations:
self.channel.confirm_delivery(self.on_delivery_confirmation)
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_flow_callback(self.on_channel_flow)
self.channel.add_on_return_callback(self.on_basic_return)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
|
rmorshea/spectate
|
spectate/mvc/base.py
|
views
|
python
|
def views(model: "Model") -> list:
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
return model._model_views[:]
|
Return a model's views keyed on what events they respond to.
Model views are added by calling :func:`view` on a model.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/base.py#L14-L21
| null |
# See End Of File For Licensing
from inspect import signature
from functools import wraps
from typing import Union, Callable, Optional
from spectate.core import Watchable, watched, Data, MethodSpectator
from .utils import members
__all__ = ["Model", "Control", "view", "unview", "views"]
def view(model: "Model", *functions: Callable) -> Optional[Callable]:
"""A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
def setup(function: Callable):
model._model_views.append(function)
return function
if functions:
for f in functions:
setup(f)
else:
return setup
def unview(model: "Model", function: Callable):
"""Remove a view callbcak from a model.
Parameters:
model: The model which contains the view function.
function: The callable which was registered to the model as a view.
Raises:
ValueError: If the given ``function`` is not a view of the given ``model``.
"""
model._model_views.remove(function)
class Control:
"""An object used to define control methods on a :class:`Model`
A "control" method on a :class:`Model` is one which reacts to another method being
called. For example there is a control method on the
:class:`~spectate.mvc.models.List`
which responds when :meth:`~spectate.mvc.models.List.append` is called.
A control method is a slightly modified :ref:`beforeback <Spectator Beforebacks>` or
:ref:`afterback <Spectator Afterbacks>` that accepts an extra ``notify`` argument.
These are added to a control object by calling :meth:`Control.before` or
:meth:`Control.after` respectively. The ``notify`` arugment is a function which
allows a control method to send messages to :func:`views <view>` that are registered
to a :class:`Model`.
Parameters:
methods:
The names of the methods on the model which this control will react to
When they are called.
Examples:
Control methods are registered to a :class:`Control` with a ``str`` or function.
A string may refer to the name of a method on a `Model` while a function should
be decorated under the same name as the :class:`Control` object to preserve the
namespace.
.. code-block:: python
from spectate import mvc
class X(mvc.Model):
_control_method = mvc.Control("method").before("_control_before_method")
def _control_before_method(self, call, notify):
print("before")
# Note how the method uses the same name. It
# would be redundant to use a different one.
@_control_a.after
def _control_method(self, answer, notify):
print("after")
def method(self):
print("during")
x = X()
x.method()
.. code-block:: text
before
during
after
"""
def __init__(self, *methods: str):
self.methods = methods
self.name = None
def __get__(self, obj, cls):
if obj is None:
return self
else:
return BoundControl(obj, self)
def before(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self
def after(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._after
self._after = callback
return self
_after, _before = None, None
def __set_name__(self, cls, name):
if not issubclass(cls, Model):
raise TypeError("Can only define a control on a Model, not %r" % cls)
if self.name:
msg = "Control was defined twice - %r and %r."
raise RuntimeError(msg % (self.name, name))
else:
self.name = name
for m in self.methods:
setattr(cls, m, MethodSpectator(getattr(cls, m), m))
class BoundControl:
def __init__(self, obj, ctrl):
self._obj = obj
self._cls = type(obj)
self._name = ctrl.name
self._before = ctrl._before
self._after = ctrl._after
self.methods = ctrl.methods
@property
def before(self):
if self._before is None:
method_name = self._name + "_before"
if hasattr(self._obj, method_name):
before = getattr(self._obj, method_name)
else:
return None
else:
before = self._before
if isinstance(before, str):
before = getattr(self._obj, before)
elif hasattr(before, "__get__"):
before = before.__get__(self._obj, type(self._obj))
@wraps(before)
def beforeback(value, call):
events = []
def notify(**event):
events.append(Data(event))
def parameters():
meth = getattr(value, call.name)
bound = signature(meth).bind(*call.args, **call.kwargs)
return dict(bound.arguments)
call = call["parameters":parameters]
result = before(call, notify)
if events:
self._obj._notify_model_views(tuple(events))
return result
return beforeback
@property
def after(self):
if self._after is None:
return None
else:
after = self._after
if isinstance(after, str):
after = getattr(self._obj, after)
elif hasattr(after, "__get__"):
after = after.__get__(self._obj, type(self._obj))
@wraps(after)
def afterback(value, answer):
events = []
def notify(**event):
events.append(Data(event))
result = after(answer, notify)
if events:
self._obj._notify_model_views(tuple(events))
return result
return afterback
class Model(Watchable):
"""An object that can be :class:`controlled <Control>` and :func:`viewed <view>`.
Users should define :class:`Control` methods and then :func:`view` the change
events those controls emit. This process starts by defining controls on a subclass
of :class:`Model`.
Examples:
.. code-block:: python
from specate import mvc
class Object(mvc.Model):
_control_attr_change = mvc.Control('__setattr__', '__delattr__')
@_control_attr_change.before
def _control_attr_change(self, call, notify):
return call.args[0], getattr(self, call.args[0], Undefined)
@_control_attr_change.after
def _control_attr_change(self, answer, notify):
attr, old = answer.before
new = getattr(self, attr, Undefined)
if new != old:
notify(attr=attr, old=old, new=new)
o = Object()
@mvc.view(o)
def printer(o, events):
for e in events:
print(e)
o.a = 1
o.b = 2
.. code-block:: text
{'attr': 'a', 'old': Undefined, 'new': 1}
{'attr': 'b', 'old': Undefined, 'new': 2}
"""
_model_controls = ()
def __init_subclass__(cls, **kwargs):
controls = []
for k, v in members(cls):
if isinstance(v, Control):
controls.append(k)
cls._model_controls = tuple(controls)
super().__init_subclass__(**kwargs)
def __new__(cls, *args, **kwargs):
self, spectator = watched(super().__new__, cls)
for name in cls._model_controls:
ctrl = getattr(self, name)
for method in ctrl.methods:
spectator.callback(method, ctrl.before, ctrl.after)
object.__setattr__(self, "_model_views", [])
return self
def _notify_model_views(self, events):
for view in self._model_views:
view(self, events)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/mvc/base.py
|
view
|
python
|
def view(model: "Model", *functions: Callable) -> Optional[Callable]:
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
def setup(function: Callable):
model._model_views.append(function)
return function
if functions:
for f in functions:
setup(f)
else:
return setup
|
A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/base.py#L24-L55
|
[
"def setup(function: Callable):\n model._model_views.append(function)\n return function\n"
] |
# See End Of File For Licensing
from inspect import signature
from functools import wraps
from typing import Union, Callable, Optional
from spectate.core import Watchable, watched, Data, MethodSpectator
from .utils import members
__all__ = ["Model", "Control", "view", "unview", "views"]
def views(model: "Model") -> list:
"""Return a model's views keyed on what events they respond to.
Model views are added by calling :func:`view` on a model.
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
return model._model_views[:]
def unview(model: "Model", function: Callable):
"""Remove a view callbcak from a model.
Parameters:
model: The model which contains the view function.
function: The callable which was registered to the model as a view.
Raises:
ValueError: If the given ``function`` is not a view of the given ``model``.
"""
model._model_views.remove(function)
class Control:
"""An object used to define control methods on a :class:`Model`
A "control" method on a :class:`Model` is one which reacts to another method being
called. For example there is a control method on the
:class:`~spectate.mvc.models.List`
which responds when :meth:`~spectate.mvc.models.List.append` is called.
A control method is a slightly modified :ref:`beforeback <Spectator Beforebacks>` or
:ref:`afterback <Spectator Afterbacks>` that accepts an extra ``notify`` argument.
These are added to a control object by calling :meth:`Control.before` or
:meth:`Control.after` respectively. The ``notify`` arugment is a function which
allows a control method to send messages to :func:`views <view>` that are registered
to a :class:`Model`.
Parameters:
methods:
The names of the methods on the model which this control will react to
When they are called.
Examples:
Control methods are registered to a :class:`Control` with a ``str`` or function.
A string may refer to the name of a method on a `Model` while a function should
be decorated under the same name as the :class:`Control` object to preserve the
namespace.
.. code-block:: python
from spectate import mvc
class X(mvc.Model):
_control_method = mvc.Control("method").before("_control_before_method")
def _control_before_method(self, call, notify):
print("before")
# Note how the method uses the same name. It
# would be redundant to use a different one.
@_control_a.after
def _control_method(self, answer, notify):
print("after")
def method(self):
print("during")
x = X()
x.method()
.. code-block:: text
before
during
after
"""
def __init__(self, *methods: str):
self.methods = methods
self.name = None
def __get__(self, obj, cls):
if obj is None:
return self
else:
return BoundControl(obj, self)
def before(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self
def after(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._after
self._after = callback
return self
_after, _before = None, None
def __set_name__(self, cls, name):
if not issubclass(cls, Model):
raise TypeError("Can only define a control on a Model, not %r" % cls)
if self.name:
msg = "Control was defined twice - %r and %r."
raise RuntimeError(msg % (self.name, name))
else:
self.name = name
for m in self.methods:
setattr(cls, m, MethodSpectator(getattr(cls, m), m))
class BoundControl:
def __init__(self, obj, ctrl):
self._obj = obj
self._cls = type(obj)
self._name = ctrl.name
self._before = ctrl._before
self._after = ctrl._after
self.methods = ctrl.methods
@property
def before(self):
if self._before is None:
method_name = self._name + "_before"
if hasattr(self._obj, method_name):
before = getattr(self._obj, method_name)
else:
return None
else:
before = self._before
if isinstance(before, str):
before = getattr(self._obj, before)
elif hasattr(before, "__get__"):
before = before.__get__(self._obj, type(self._obj))
@wraps(before)
def beforeback(value, call):
events = []
def notify(**event):
events.append(Data(event))
def parameters():
meth = getattr(value, call.name)
bound = signature(meth).bind(*call.args, **call.kwargs)
return dict(bound.arguments)
call = call["parameters":parameters]
result = before(call, notify)
if events:
self._obj._notify_model_views(tuple(events))
return result
return beforeback
@property
def after(self):
if self._after is None:
return None
else:
after = self._after
if isinstance(after, str):
after = getattr(self._obj, after)
elif hasattr(after, "__get__"):
after = after.__get__(self._obj, type(self._obj))
@wraps(after)
def afterback(value, answer):
events = []
def notify(**event):
events.append(Data(event))
result = after(answer, notify)
if events:
self._obj._notify_model_views(tuple(events))
return result
return afterback
class Model(Watchable):
"""An object that can be :class:`controlled <Control>` and :func:`viewed <view>`.
Users should define :class:`Control` methods and then :func:`view` the change
events those controls emit. This process starts by defining controls on a subclass
of :class:`Model`.
Examples:
.. code-block:: python
from specate import mvc
class Object(mvc.Model):
_control_attr_change = mvc.Control('__setattr__', '__delattr__')
@_control_attr_change.before
def _control_attr_change(self, call, notify):
return call.args[0], getattr(self, call.args[0], Undefined)
@_control_attr_change.after
def _control_attr_change(self, answer, notify):
attr, old = answer.before
new = getattr(self, attr, Undefined)
if new != old:
notify(attr=attr, old=old, new=new)
o = Object()
@mvc.view(o)
def printer(o, events):
for e in events:
print(e)
o.a = 1
o.b = 2
.. code-block:: text
{'attr': 'a', 'old': Undefined, 'new': 1}
{'attr': 'b', 'old': Undefined, 'new': 2}
"""
_model_controls = ()
def __init_subclass__(cls, **kwargs):
controls = []
for k, v in members(cls):
if isinstance(v, Control):
controls.append(k)
cls._model_controls = tuple(controls)
super().__init_subclass__(**kwargs)
def __new__(cls, *args, **kwargs):
self, spectator = watched(super().__new__, cls)
for name in cls._model_controls:
ctrl = getattr(self, name)
for method in ctrl.methods:
spectator.callback(method, ctrl.before, ctrl.after)
object.__setattr__(self, "_model_views", [])
return self
def _notify_model_views(self, events):
for view in self._model_views:
view(self, events)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/mvc/base.py
|
Control.before
|
python
|
def before(self, callback: Union[Callable, str]) -> "Control":
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self
|
Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/base.py#L137-L149
| null |
class Control:
"""An object used to define control methods on a :class:`Model`
A "control" method on a :class:`Model` is one which reacts to another method being
called. For example there is a control method on the
:class:`~spectate.mvc.models.List`
which responds when :meth:`~spectate.mvc.models.List.append` is called.
A control method is a slightly modified :ref:`beforeback <Spectator Beforebacks>` or
:ref:`afterback <Spectator Afterbacks>` that accepts an extra ``notify`` argument.
These are added to a control object by calling :meth:`Control.before` or
:meth:`Control.after` respectively. The ``notify`` arugment is a function which
allows a control method to send messages to :func:`views <view>` that are registered
to a :class:`Model`.
Parameters:
methods:
The names of the methods on the model which this control will react to
When they are called.
Examples:
Control methods are registered to a :class:`Control` with a ``str`` or function.
A string may refer to the name of a method on a `Model` while a function should
be decorated under the same name as the :class:`Control` object to preserve the
namespace.
.. code-block:: python
from spectate import mvc
class X(mvc.Model):
_control_method = mvc.Control("method").before("_control_before_method")
def _control_before_method(self, call, notify):
print("before")
# Note how the method uses the same name. It
# would be redundant to use a different one.
@_control_a.after
def _control_method(self, answer, notify):
print("after")
def method(self):
print("during")
x = X()
x.method()
.. code-block:: text
before
during
after
"""
def __init__(self, *methods: str):
self.methods = methods
self.name = None
def __get__(self, obj, cls):
if obj is None:
return self
else:
return BoundControl(obj, self)
def after(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._after
self._after = callback
return self
_after, _before = None, None
def __set_name__(self, cls, name):
if not issubclass(cls, Model):
raise TypeError("Can only define a control on a Model, not %r" % cls)
if self.name:
msg = "Control was defined twice - %r and %r."
raise RuntimeError(msg % (self.name, name))
else:
self.name = name
for m in self.methods:
setattr(cls, m, MethodSpectator(getattr(cls, m), m))
|
rmorshea/spectate
|
spectate/mvc/base.py
|
Control.after
|
python
|
def after(self, callback: Union[Callable, str]) -> "Control":
if isinstance(callback, Control):
callback = callback._after
self._after = callback
return self
|
Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/base.py#L151-L163
| null |
class Control:
"""An object used to define control methods on a :class:`Model`
A "control" method on a :class:`Model` is one which reacts to another method being
called. For example there is a control method on the
:class:`~spectate.mvc.models.List`
which responds when :meth:`~spectate.mvc.models.List.append` is called.
A control method is a slightly modified :ref:`beforeback <Spectator Beforebacks>` or
:ref:`afterback <Spectator Afterbacks>` that accepts an extra ``notify`` argument.
These are added to a control object by calling :meth:`Control.before` or
:meth:`Control.after` respectively. The ``notify`` arugment is a function which
allows a control method to send messages to :func:`views <view>` that are registered
to a :class:`Model`.
Parameters:
methods:
The names of the methods on the model which this control will react to
When they are called.
Examples:
Control methods are registered to a :class:`Control` with a ``str`` or function.
A string may refer to the name of a method on a `Model` while a function should
be decorated under the same name as the :class:`Control` object to preserve the
namespace.
.. code-block:: python
from spectate import mvc
class X(mvc.Model):
_control_method = mvc.Control("method").before("_control_before_method")
def _control_before_method(self, call, notify):
print("before")
# Note how the method uses the same name. It
# would be redundant to use a different one.
@_control_a.after
def _control_method(self, answer, notify):
print("after")
def method(self):
print("during")
x = X()
x.method()
.. code-block:: text
before
during
after
"""
def __init__(self, *methods: str):
self.methods = methods
self.name = None
def __get__(self, obj, cls):
if obj is None:
return self
else:
return BoundControl(obj, self)
def before(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self
_after, _before = None, None
def __set_name__(self, cls, name):
if not issubclass(cls, Model):
raise TypeError("Can only define a control on a Model, not %r" % cls)
if self.name:
msg = "Control was defined twice - %r and %r."
raise RuntimeError(msg % (self.name, name))
else:
self.name = name
for m in self.methods:
setattr(cls, m, MethodSpectator(getattr(cls, m), m))
|
rmorshea/spectate
|
spectate/mvc/events.py
|
hold
|
python
|
def hold(model: Model, reducer: Optional[Callable] = None) -> Iterator[list]:
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
events = []
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: events.extend(e)
try:
yield events
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
events = tuple(events)
if reducer is not None:
events = tuple(map(Data, reducer(model, events)))
model._notify_model_views(events)
|
Temporarilly withold change events in a modifiable list.
All changes that are captured within a "hold" context are forwarded to a list
which is yielded to the user before being sent to views of the given ``model``.
If desired, the user may modify the list of events before the context is left in
order to change the events that are ultimately sent to the model's views.
Parameters:
model:
The model object whose change events will be temporarilly witheld.
reducer:
A function for modifying the events list at the end of the context.
Its signature is ``(model, events) -> new_events`` where ``model`` is the
given model, ``events`` is the complete list of events produced in the
context, and the returned ``new_events`` is a list of events that will
actuall be distributed to views.
Notes:
All changes witheld from views will be sent as a single notification. For
example if you view a :class:`specate.mvc.models.List` and its ``append()``
method is called three times within a :func:`hold` context,
Examples:
Note how the event from ``l.append(1)`` is omitted from the printed statements.
.. code-block:: python
from spectate import mvc
l = mvc.List()
mvc.view(d, lambda d, e: list(map(print, e)))
with mvc.hold(l) as events:
l.append(1)
l.append(2)
del events[0]
.. code-block:: text
{'index': 1, 'old': Undefined, 'new': 2}
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/events.py#L12-L76
| null |
from contextlib import contextmanager
from typing import Iterator, Callable, Optional
from spectate import Data
from .base import Model
__all__ = ["hold", "mute", "rollback"]
@contextmanager
@contextmanager
def rollback(
model: Model, undo: Optional[Callable] = None, *args, **kwargs
) -> Iterator[list]:
"""Withold events if an error occurs.
Generall operate
Parameters:
model:
The model object whose change events may be witheld.
undo:
An optional function for reversing any changes that may have taken place.
Its signature is ``(model, events, error)`` where ``model`` is the given
model, ``event`` is a tuple of all the events that took place, and ``error``
is the exception that was riased. Any changes that you make to the model
within this function will not produce events.
Examples:
Simple supression of events:
.. code-block:: python
from spectate import mvc
d = mvc.Dict()
@mvc.view(d)
def should_not_be_called(d, events):
# we never call this view
assert False
try:
with mvc.rollback(d):
d["a"] = 1
d["b"] # key doesn't exist
except KeyError:
pass
Undo changes for a dictionary:
.. code-block:: python
from spectate import mvc
def undo_dict_changes(model, events, error):
seen = set()
for e in reversed(events):
if e.old is mvc.Undefined:
del model[e.key]
else:
model[e.key] = e.old
try:
with mvc.rollback(d, undo=undo_dict_changes):
d["a"] = 1
d["b"] = 2
print(d)
d["c"]
except KeyError:
pass
print(d)
.. code-block:: python
{'a': 1, 'b': 2}
{}
"""
with hold(model, *args, **kwargs) as events:
try:
yield events
except Exception as error:
if undo is not None:
with mute(model):
undo(model, tuple(events), error)
events.clear()
raise
@contextmanager
def mute(model: Model):
"""Block a model's views from being notified.
All changes within a "mute" context will be blocked. No content is yielded to the
user as in :func:`hold`, and the views of the model are never notified that changes
took place.
Parameters:
mode: The model whose change events will be blocked.
Examples:
The view is never called due to the :func:`mute` context:
.. code-block:: python
from spectate import mvc
l = mvc.List()
@mvc.view(l)
def raises(events):
raise ValueError("Events occured!")
with mvc.mute(l):
l.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: None
try:
yield
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
|
rmorshea/spectate
|
spectate/mvc/events.py
|
rollback
|
python
|
def rollback(
model: Model, undo: Optional[Callable] = None, *args, **kwargs
) -> Iterator[list]:
with hold(model, *args, **kwargs) as events:
try:
yield events
except Exception as error:
if undo is not None:
with mute(model):
undo(model, tuple(events), error)
events.clear()
raise
|
Withold events if an error occurs.
Generall operate
Parameters:
model:
The model object whose change events may be witheld.
undo:
An optional function for reversing any changes that may have taken place.
Its signature is ``(model, events, error)`` where ``model`` is the given
model, ``event`` is a tuple of all the events that took place, and ``error``
is the exception that was riased. Any changes that you make to the model
within this function will not produce events.
Examples:
Simple supression of events:
.. code-block:: python
from spectate import mvc
d = mvc.Dict()
@mvc.view(d)
def should_not_be_called(d, events):
# we never call this view
assert False
try:
with mvc.rollback(d):
d["a"] = 1
d["b"] # key doesn't exist
except KeyError:
pass
Undo changes for a dictionary:
.. code-block:: python
from spectate import mvc
def undo_dict_changes(model, events, error):
seen = set()
for e in reversed(events):
if e.old is mvc.Undefined:
del model[e.key]
else:
model[e.key] = e.old
try:
with mvc.rollback(d, undo=undo_dict_changes):
d["a"] = 1
d["b"] = 2
print(d)
d["c"]
except KeyError:
pass
print(d)
.. code-block:: python
{'a': 1, 'b': 2}
{}
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/events.py#L80-L156
| null |
from contextlib import contextmanager
from typing import Iterator, Callable, Optional
from spectate import Data
from .base import Model
__all__ = ["hold", "mute", "rollback"]
@contextmanager
def hold(model: Model, reducer: Optional[Callable] = None) -> Iterator[list]:
"""Temporarilly withold change events in a modifiable list.
All changes that are captured within a "hold" context are forwarded to a list
which is yielded to the user before being sent to views of the given ``model``.
If desired, the user may modify the list of events before the context is left in
order to change the events that are ultimately sent to the model's views.
Parameters:
model:
The model object whose change events will be temporarilly witheld.
reducer:
A function for modifying the events list at the end of the context.
Its signature is ``(model, events) -> new_events`` where ``model`` is the
given model, ``events`` is the complete list of events produced in the
context, and the returned ``new_events`` is a list of events that will
actuall be distributed to views.
Notes:
All changes witheld from views will be sent as a single notification. For
example if you view a :class:`specate.mvc.models.List` and its ``append()``
method is called three times within a :func:`hold` context,
Examples:
Note how the event from ``l.append(1)`` is omitted from the printed statements.
.. code-block:: python
from spectate import mvc
l = mvc.List()
mvc.view(d, lambda d, e: list(map(print, e)))
with mvc.hold(l) as events:
l.append(1)
l.append(2)
del events[0]
.. code-block:: text
{'index': 1, 'old': Undefined, 'new': 2}
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
events = []
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: events.extend(e)
try:
yield events
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
events = tuple(events)
if reducer is not None:
events = tuple(map(Data, reducer(model, events)))
model._notify_model_views(events)
@contextmanager
@contextmanager
def mute(model: Model):
"""Block a model's views from being notified.
All changes within a "mute" context will be blocked. No content is yielded to the
user as in :func:`hold`, and the views of the model are never notified that changes
took place.
Parameters:
mode: The model whose change events will be blocked.
Examples:
The view is never called due to the :func:`mute` context:
.. code-block:: python
from spectate import mvc
l = mvc.List()
@mvc.view(l)
def raises(events):
raise ValueError("Events occured!")
with mvc.mute(l):
l.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: None
try:
yield
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
|
rmorshea/spectate
|
spectate/mvc/events.py
|
mute
|
python
|
def mute(model: Model):
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: None
try:
yield
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
|
Block a model's views from being notified.
All changes within a "mute" context will be blocked. No content is yielded to the
user as in :func:`hold`, and the views of the model are never notified that changes
took place.
Parameters:
mode: The model whose change events will be blocked.
Examples:
The view is never called due to the :func:`mute` context:
.. code-block:: python
from spectate import mvc
l = mvc.List()
@mvc.view(l)
def raises(events):
raise ValueError("Events occured!")
with mvc.mute(l):
l.append(1)
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/events.py#L160-L197
| null |
from contextlib import contextmanager
from typing import Iterator, Callable, Optional
from spectate import Data
from .base import Model
__all__ = ["hold", "mute", "rollback"]
@contextmanager
def hold(model: Model, reducer: Optional[Callable] = None) -> Iterator[list]:
"""Temporarilly withold change events in a modifiable list.
All changes that are captured within a "hold" context are forwarded to a list
which is yielded to the user before being sent to views of the given ``model``.
If desired, the user may modify the list of events before the context is left in
order to change the events that are ultimately sent to the model's views.
Parameters:
model:
The model object whose change events will be temporarilly witheld.
reducer:
A function for modifying the events list at the end of the context.
Its signature is ``(model, events) -> new_events`` where ``model`` is the
given model, ``events`` is the complete list of events produced in the
context, and the returned ``new_events`` is a list of events that will
actuall be distributed to views.
Notes:
All changes witheld from views will be sent as a single notification. For
example if you view a :class:`specate.mvc.models.List` and its ``append()``
method is called three times within a :func:`hold` context,
Examples:
Note how the event from ``l.append(1)`` is omitted from the printed statements.
.. code-block:: python
from spectate import mvc
l = mvc.List()
mvc.view(d, lambda d, e: list(map(print, e)))
with mvc.hold(l) as events:
l.append(1)
l.append(2)
del events[0]
.. code-block:: text
{'index': 1, 'old': Undefined, 'new': 2}
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
events = []
restore = model.__dict__.get("_notify_model_views")
model._notify_model_views = lambda e: events.extend(e)
try:
yield events
finally:
if restore is None:
del model._notify_model_views
else:
model._notify_model_views = restore
events = tuple(events)
if reducer is not None:
events = tuple(map(Data, reducer(model, events)))
model._notify_model_views(events)
@contextmanager
def rollback(
model: Model, undo: Optional[Callable] = None, *args, **kwargs
) -> Iterator[list]:
"""Withold events if an error occurs.
Generall operate
Parameters:
model:
The model object whose change events may be witheld.
undo:
An optional function for reversing any changes that may have taken place.
Its signature is ``(model, events, error)`` where ``model`` is the given
model, ``event`` is a tuple of all the events that took place, and ``error``
is the exception that was riased. Any changes that you make to the model
within this function will not produce events.
Examples:
Simple supression of events:
.. code-block:: python
from spectate import mvc
d = mvc.Dict()
@mvc.view(d)
def should_not_be_called(d, events):
# we never call this view
assert False
try:
with mvc.rollback(d):
d["a"] = 1
d["b"] # key doesn't exist
except KeyError:
pass
Undo changes for a dictionary:
.. code-block:: python
from spectate import mvc
def undo_dict_changes(model, events, error):
seen = set()
for e in reversed(events):
if e.old is mvc.Undefined:
del model[e.key]
else:
model[e.key] = e.old
try:
with mvc.rollback(d, undo=undo_dict_changes):
d["a"] = 1
d["b"] = 2
print(d)
d["c"]
except KeyError:
pass
print(d)
.. code-block:: python
{'a': 1, 'b': 2}
{}
"""
with hold(model, *args, **kwargs) as events:
try:
yield events
except Exception as error:
if undo is not None:
with mute(model):
undo(model, tuple(events), error)
events.clear()
raise
@contextmanager
|
rmorshea/spectate
|
spectate/core.py
|
expose
|
python
|
def expose(*methods):
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
|
A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L250-L273
| null |
# See End Of File For Licensing
import sys
import inspect
import collections
from functools import wraps
try:
from inspect import signature, Parameter, Signature
except ImportError:
from funcsigs import signature, Parameter, Signature
__all__ = [
"expose",
"expose_as",
"watch",
"watched",
"unwatch",
"watcher",
"watchable",
"Watchable",
"Data",
]
def _safe_signature(func):
try:
return signature(func)
except ValueError:
# builtin methods don't have sigantures
return Signature(
[
Parameter("args", Parameter.VAR_POSITIONAL),
Parameter("kwargs", Parameter.VAR_KEYWORD),
]
)
def _signature_breakdown(func):
args = []
defaults = []
var_keyword = None
var_positional = None
sig = _safe_signature(func)
for param in sig.parameters.values():
if param.kind == Parameter.VAR_POSITIONAL:
var_positional = param.name
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword = param.name
else:
if param.default is not Parameter.empty:
defaults.append(param.default)
args.append(param.name)
return str(sig), tuple(args), defaults, var_positional, var_keyword
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
class MethodSpectator(object):
"""Notifies a :class:`Specator` when the method this descriptor wraps is called."""
def __init__(self, basemethod, name):
if not callable(basemethod):
if isinstance(basemethod, MethodSpectator):
basemethod = basemethod.basemethod
else:
raise TypeError("Expected a callable, not %r" % basemethod)
self.basemethod = basemethod
self.name = name
def call(self, obj, cls):
spectator = obj._instance_spectator
if hasattr(self.basemethod, "__get__"):
method = self.basemethod.__get__(obj, cls)
else:
method = self.basemethod
@wraps(method)
def wrapper(*args, **kwargs):
return spectator.call(obj, self.name, method, args, kwargs)
if not hasattr(wrapper, "__wrapped__"):
wrapper.__wrapped__ = method
return wrapper
def __get__(self, obj, cls):
if obj is None:
return self
elif hasattr(obj, "_instance_spectator"):
return self.call(obj, cls)
elif hasattr(self.basemethod, "__get__"):
return self.basemethod.__get__(obj, cls)
else:
return self.basemethod
class Watchable(object):
"""A base class for introspection.
And in Python>=3.6 rewraps overriden methods with a :class:`MethodSpectator`
if appropriate.
"""
if not sys.version_info < (3, 6):
def __init_subclass__(cls, **kwargs):
# If a subclass overrides a :class:`MethodSpectator` method, then rewrap it.
for base in cls.mro()[1:]:
if issubclass(base, Watchable):
for k, v in base.__dict__.items():
if k in cls.__dict__ and isinstance(v, MethodSpectator):
new = getattr(cls, k)
if callable(new) and not isinstance(new, MethodSpectator):
method_spectator = MethodSpectator(new, k)
setattr(cls, k, method_spectator)
super().__init_subclass__(**kwargs)
def expose_as(name, base, *methods):
"""Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
"""
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
def watchable(value):
"""Returns True if the given value is a :class:`Watchable` subclass or instance."""
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
def watched(cls, *args, **kwargs):
"""Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
"""
value = cls(*args, **kwargs)
return value, watch(value)
def unwatch(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
def watcher(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
return getattr(value, "_instance_spectator", None)
class Data(collections.Mapping):
"""An immutable mapping with attribute-access.
Empty keys are represented with a value of ``None``.
In order to evolve :class:`Data`, users must create copies that
contain updates:
.. code-block:: python
d1 = Data(a=1)
d2 = Data(b=2)
assert Data(d1, **d2) == {'a': 1, 'b': 2}
Easing this fact, is :class:`Data`'s syntactic sugar:
.. code-block:: python
d1 = Data(a=1)
assert d1 == {'a': 1}
d2 = d1['b': 2]
assert d2 == {'a': 1, 'b': 2}
d3 = d2['a': None, 'b': 1]
assert d3 == {'b': 1}
d4 = d3[{'a': 1, 'c': 3}, {'b': None}]
assert d4 == {'a': 1, 'c': 3}
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getattr__(self, key):
return None
def __getitem__(self, key):
if type(key) is slice:
key = (key,)
if type(key) is tuple:
for x in key:
if not isinstance(x, slice):
break
else:
new = {s.start: s.stop for s in key}
return type(self)(self, **new)
merge = {}
for x in key:
if isinstance(x, collections.Mapping):
merge.update(x)
key = merge
if isinstance(key, collections.Mapping):
return type(self)(self, **key)
return self.__dict__.get(key)
def __setitem__(self, key, value):
raise TypeError("%r is immutable")
def __setattr__(self, key, value):
raise TypeError("%r is immutable")
def __delitem__(self, key):
raise TypeError("%r is immutable")
def __delattr__(self, key):
raise TypeError("%r is immutable")
def __contains__(self, key):
return key in tuple(self)
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/core.py
|
expose_as
|
python
|
def expose_as(name, base, *methods):
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
|
Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L276-L302
| null |
# See End Of File For Licensing
import sys
import inspect
import collections
from functools import wraps
try:
from inspect import signature, Parameter, Signature
except ImportError:
from funcsigs import signature, Parameter, Signature
__all__ = [
"expose",
"expose_as",
"watch",
"watched",
"unwatch",
"watcher",
"watchable",
"Watchable",
"Data",
]
def _safe_signature(func):
try:
return signature(func)
except ValueError:
# builtin methods don't have sigantures
return Signature(
[
Parameter("args", Parameter.VAR_POSITIONAL),
Parameter("kwargs", Parameter.VAR_KEYWORD),
]
)
def _signature_breakdown(func):
args = []
defaults = []
var_keyword = None
var_positional = None
sig = _safe_signature(func)
for param in sig.parameters.values():
if param.kind == Parameter.VAR_POSITIONAL:
var_positional = param.name
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword = param.name
else:
if param.default is not Parameter.empty:
defaults.append(param.default)
args.append(param.name)
return str(sig), tuple(args), defaults, var_positional, var_keyword
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
class MethodSpectator(object):
"""Notifies a :class:`Specator` when the method this descriptor wraps is called."""
def __init__(self, basemethod, name):
if not callable(basemethod):
if isinstance(basemethod, MethodSpectator):
basemethod = basemethod.basemethod
else:
raise TypeError("Expected a callable, not %r" % basemethod)
self.basemethod = basemethod
self.name = name
def call(self, obj, cls):
spectator = obj._instance_spectator
if hasattr(self.basemethod, "__get__"):
method = self.basemethod.__get__(obj, cls)
else:
method = self.basemethod
@wraps(method)
def wrapper(*args, **kwargs):
return spectator.call(obj, self.name, method, args, kwargs)
if not hasattr(wrapper, "__wrapped__"):
wrapper.__wrapped__ = method
return wrapper
def __get__(self, obj, cls):
if obj is None:
return self
elif hasattr(obj, "_instance_spectator"):
return self.call(obj, cls)
elif hasattr(self.basemethod, "__get__"):
return self.basemethod.__get__(obj, cls)
else:
return self.basemethod
class Watchable(object):
"""A base class for introspection.
And in Python>=3.6 rewraps overriden methods with a :class:`MethodSpectator`
if appropriate.
"""
if not sys.version_info < (3, 6):
def __init_subclass__(cls, **kwargs):
# If a subclass overrides a :class:`MethodSpectator` method, then rewrap it.
for base in cls.mro()[1:]:
if issubclass(base, Watchable):
for k, v in base.__dict__.items():
if k in cls.__dict__ and isinstance(v, MethodSpectator):
new = getattr(cls, k)
if callable(new) and not isinstance(new, MethodSpectator):
method_spectator = MethodSpectator(new, k)
setattr(cls, k, method_spectator)
super().__init_subclass__(**kwargs)
def expose(*methods):
"""A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
"""
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
def watchable(value):
"""Returns True if the given value is a :class:`Watchable` subclass or instance."""
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
def watched(cls, *args, **kwargs):
"""Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
"""
value = cls(*args, **kwargs)
return value, watch(value)
def unwatch(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
def watcher(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
return getattr(value, "_instance_spectator", None)
class Data(collections.Mapping):
"""An immutable mapping with attribute-access.
Empty keys are represented with a value of ``None``.
In order to evolve :class:`Data`, users must create copies that
contain updates:
.. code-block:: python
d1 = Data(a=1)
d2 = Data(b=2)
assert Data(d1, **d2) == {'a': 1, 'b': 2}
Easing this fact, is :class:`Data`'s syntactic sugar:
.. code-block:: python
d1 = Data(a=1)
assert d1 == {'a': 1}
d2 = d1['b': 2]
assert d2 == {'a': 1, 'b': 2}
d3 = d2['a': None, 'b': 1]
assert d3 == {'b': 1}
d4 = d3[{'a': 1, 'c': 3}, {'b': None}]
assert d4 == {'a': 1, 'c': 3}
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getattr__(self, key):
return None
def __getitem__(self, key):
if type(key) is slice:
key = (key,)
if type(key) is tuple:
for x in key:
if not isinstance(x, slice):
break
else:
new = {s.start: s.stop for s in key}
return type(self)(self, **new)
merge = {}
for x in key:
if isinstance(x, collections.Mapping):
merge.update(x)
key = merge
if isinstance(key, collections.Mapping):
return type(self)(self, **key)
return self.__dict__.get(key)
def __setitem__(self, key, value):
raise TypeError("%r is immutable")
def __setattr__(self, key, value):
raise TypeError("%r is immutable")
def __delitem__(self, key):
raise TypeError("%r is immutable")
def __delattr__(self, key):
raise TypeError("%r is immutable")
def __contains__(self, key):
return key in tuple(self)
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/core.py
|
watchable
|
python
|
def watchable(value):
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
|
Returns True if the given value is a :class:`Watchable` subclass or instance.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L305-L308
| null |
# See End Of File For Licensing
import sys
import inspect
import collections
from functools import wraps
try:
from inspect import signature, Parameter, Signature
except ImportError:
from funcsigs import signature, Parameter, Signature
__all__ = [
"expose",
"expose_as",
"watch",
"watched",
"unwatch",
"watcher",
"watchable",
"Watchable",
"Data",
]
def _safe_signature(func):
try:
return signature(func)
except ValueError:
# builtin methods don't have sigantures
return Signature(
[
Parameter("args", Parameter.VAR_POSITIONAL),
Parameter("kwargs", Parameter.VAR_KEYWORD),
]
)
def _signature_breakdown(func):
args = []
defaults = []
var_keyword = None
var_positional = None
sig = _safe_signature(func)
for param in sig.parameters.values():
if param.kind == Parameter.VAR_POSITIONAL:
var_positional = param.name
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword = param.name
else:
if param.default is not Parameter.empty:
defaults.append(param.default)
args.append(param.name)
return str(sig), tuple(args), defaults, var_positional, var_keyword
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
class MethodSpectator(object):
"""Notifies a :class:`Specator` when the method this descriptor wraps is called."""
def __init__(self, basemethod, name):
if not callable(basemethod):
if isinstance(basemethod, MethodSpectator):
basemethod = basemethod.basemethod
else:
raise TypeError("Expected a callable, not %r" % basemethod)
self.basemethod = basemethod
self.name = name
def call(self, obj, cls):
spectator = obj._instance_spectator
if hasattr(self.basemethod, "__get__"):
method = self.basemethod.__get__(obj, cls)
else:
method = self.basemethod
@wraps(method)
def wrapper(*args, **kwargs):
return spectator.call(obj, self.name, method, args, kwargs)
if not hasattr(wrapper, "__wrapped__"):
wrapper.__wrapped__ = method
return wrapper
def __get__(self, obj, cls):
if obj is None:
return self
elif hasattr(obj, "_instance_spectator"):
return self.call(obj, cls)
elif hasattr(self.basemethod, "__get__"):
return self.basemethod.__get__(obj, cls)
else:
return self.basemethod
class Watchable(object):
"""A base class for introspection.
And in Python>=3.6 rewraps overriden methods with a :class:`MethodSpectator`
if appropriate.
"""
if not sys.version_info < (3, 6):
def __init_subclass__(cls, **kwargs):
# If a subclass overrides a :class:`MethodSpectator` method, then rewrap it.
for base in cls.mro()[1:]:
if issubclass(base, Watchable):
for k, v in base.__dict__.items():
if k in cls.__dict__ and isinstance(v, MethodSpectator):
new = getattr(cls, k)
if callable(new) and not isinstance(new, MethodSpectator):
method_spectator = MethodSpectator(new, k)
setattr(cls, k, method_spectator)
super().__init_subclass__(**kwargs)
def expose(*methods):
"""A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
"""
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
def expose_as(name, base, *methods):
"""Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
"""
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
def watched(cls, *args, **kwargs):
"""Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
"""
value = cls(*args, **kwargs)
return value, watch(value)
def unwatch(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
def watcher(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
return getattr(value, "_instance_spectator", None)
class Data(collections.Mapping):
"""An immutable mapping with attribute-access.
Empty keys are represented with a value of ``None``.
In order to evolve :class:`Data`, users must create copies that
contain updates:
.. code-block:: python
d1 = Data(a=1)
d2 = Data(b=2)
assert Data(d1, **d2) == {'a': 1, 'b': 2}
Easing this fact, is :class:`Data`'s syntactic sugar:
.. code-block:: python
d1 = Data(a=1)
assert d1 == {'a': 1}
d2 = d1['b': 2]
assert d2 == {'a': 1, 'b': 2}
d3 = d2['a': None, 'b': 1]
assert d3 == {'b': 1}
d4 = d3[{'a': 1, 'c': 3}, {'b': None}]
assert d4 == {'a': 1, 'c': 3}
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getattr__(self, key):
return None
def __getitem__(self, key):
if type(key) is slice:
key = (key,)
if type(key) is tuple:
for x in key:
if not isinstance(x, slice):
break
else:
new = {s.start: s.stop for s in key}
return type(self)(self, **new)
merge = {}
for x in key:
if isinstance(x, collections.Mapping):
merge.update(x)
key = merge
if isinstance(key, collections.Mapping):
return type(self)(self, **key)
return self.__dict__.get(key)
def __setitem__(self, key, value):
raise TypeError("%r is immutable")
def __setattr__(self, key, value):
raise TypeError("%r is immutable")
def __delitem__(self, key):
raise TypeError("%r is immutable")
def __delattr__(self, key):
raise TypeError("%r is immutable")
def __contains__(self, key):
return key in tuple(self)
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/core.py
|
watch
|
python
|
def watch(value, spectator_type=Spectator):
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
|
Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L311-L341
| null |
# See End Of File For Licensing
import sys
import inspect
import collections
from functools import wraps
try:
from inspect import signature, Parameter, Signature
except ImportError:
from funcsigs import signature, Parameter, Signature
__all__ = [
"expose",
"expose_as",
"watch",
"watched",
"unwatch",
"watcher",
"watchable",
"Watchable",
"Data",
]
def _safe_signature(func):
try:
return signature(func)
except ValueError:
# builtin methods don't have sigantures
return Signature(
[
Parameter("args", Parameter.VAR_POSITIONAL),
Parameter("kwargs", Parameter.VAR_KEYWORD),
]
)
def _signature_breakdown(func):
args = []
defaults = []
var_keyword = None
var_positional = None
sig = _safe_signature(func)
for param in sig.parameters.values():
if param.kind == Parameter.VAR_POSITIONAL:
var_positional = param.name
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword = param.name
else:
if param.default is not Parameter.empty:
defaults.append(param.default)
args.append(param.name)
return str(sig), tuple(args), defaults, var_positional, var_keyword
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
class MethodSpectator(object):
"""Notifies a :class:`Specator` when the method this descriptor wraps is called."""
def __init__(self, basemethod, name):
if not callable(basemethod):
if isinstance(basemethod, MethodSpectator):
basemethod = basemethod.basemethod
else:
raise TypeError("Expected a callable, not %r" % basemethod)
self.basemethod = basemethod
self.name = name
def call(self, obj, cls):
spectator = obj._instance_spectator
if hasattr(self.basemethod, "__get__"):
method = self.basemethod.__get__(obj, cls)
else:
method = self.basemethod
@wraps(method)
def wrapper(*args, **kwargs):
return spectator.call(obj, self.name, method, args, kwargs)
if not hasattr(wrapper, "__wrapped__"):
wrapper.__wrapped__ = method
return wrapper
def __get__(self, obj, cls):
if obj is None:
return self
elif hasattr(obj, "_instance_spectator"):
return self.call(obj, cls)
elif hasattr(self.basemethod, "__get__"):
return self.basemethod.__get__(obj, cls)
else:
return self.basemethod
class Watchable(object):
"""A base class for introspection.
And in Python>=3.6 rewraps overriden methods with a :class:`MethodSpectator`
if appropriate.
"""
if not sys.version_info < (3, 6):
def __init_subclass__(cls, **kwargs):
# If a subclass overrides a :class:`MethodSpectator` method, then rewrap it.
for base in cls.mro()[1:]:
if issubclass(base, Watchable):
for k, v in base.__dict__.items():
if k in cls.__dict__ and isinstance(v, MethodSpectator):
new = getattr(cls, k)
if callable(new) and not isinstance(new, MethodSpectator):
method_spectator = MethodSpectator(new, k)
setattr(cls, k, method_spectator)
super().__init_subclass__(**kwargs)
def expose(*methods):
"""A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
"""
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
def expose_as(name, base, *methods):
"""Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
"""
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
def watchable(value):
"""Returns True if the given value is a :class:`Watchable` subclass or instance."""
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
def watched(cls, *args, **kwargs):
"""Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
"""
value = cls(*args, **kwargs)
return value, watch(value)
def unwatch(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
def watcher(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
return getattr(value, "_instance_spectator", None)
class Data(collections.Mapping):
"""An immutable mapping with attribute-access.
Empty keys are represented with a value of ``None``.
In order to evolve :class:`Data`, users must create copies that
contain updates:
.. code-block:: python
d1 = Data(a=1)
d2 = Data(b=2)
assert Data(d1, **d2) == {'a': 1, 'b': 2}
Easing this fact, is :class:`Data`'s syntactic sugar:
.. code-block:: python
d1 = Data(a=1)
assert d1 == {'a': 1}
d2 = d1['b': 2]
assert d2 == {'a': 1, 'b': 2}
d3 = d2['a': None, 'b': 1]
assert d3 == {'b': 1}
d4 = d3[{'a': 1, 'c': 3}, {'b': None}]
assert d4 == {'a': 1, 'c': 3}
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getattr__(self, key):
return None
def __getitem__(self, key):
if type(key) is slice:
key = (key,)
if type(key) is tuple:
for x in key:
if not isinstance(x, slice):
break
else:
new = {s.start: s.stop for s in key}
return type(self)(self, **new)
merge = {}
for x in key:
if isinstance(x, collections.Mapping):
merge.update(x)
key = merge
if isinstance(key, collections.Mapping):
return type(self)(self, **key)
return self.__dict__.get(key)
def __setitem__(self, key, value):
raise TypeError("%r is immutable")
def __setattr__(self, key, value):
raise TypeError("%r is immutable")
def __delitem__(self, key):
raise TypeError("%r is immutable")
def __delattr__(self, key):
raise TypeError("%r is immutable")
def __contains__(self, key):
return key in tuple(self)
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/core.py
|
watched
|
python
|
def watched(cls, *args, **kwargs):
value = cls(*args, **kwargs)
return value, watch(value)
|
Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L344-L359
|
[
"def watch(value, spectator_type=Spectator):\n \"\"\"Register a :class:`Specatator` to a :class:`Watchable` and return it.\n\n In order to register callbacks to an eventful object, you need to create\n a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple\n object that has methods for adding, deleting, and triggering callbacks. To\n create a spectator we call ``spectator = watch(x)``, where x is a Watchable\n instance.\n\n Parameters\n ----------\n value : Watchable\n A :class:`Watchable` instance.\n spectator_type : Spectator\n The type of spectator that will be returned.\n\n Returns\n -------\n spectator: spectator_type\n The :class:`Specatator` (specified by ``spectator_type``) that is\n was registered to the given instance.\n \"\"\"\n if isinstance(value, Watchable):\n wtype = type(value)\n else:\n raise TypeError(\"Expected a Watchable, not %r.\" % value)\n spectator = getattr(value, \"_instance_spectator\", None)\n if not isinstance(spectator, Spectator):\n spectator = spectator_type(wtype)\n value._instance_spectator = spectator\n return spectator\n"
] |
# See End Of File For Licensing
import sys
import inspect
import collections
from functools import wraps
try:
from inspect import signature, Parameter, Signature
except ImportError:
from funcsigs import signature, Parameter, Signature
__all__ = [
"expose",
"expose_as",
"watch",
"watched",
"unwatch",
"watcher",
"watchable",
"Watchable",
"Data",
]
def _safe_signature(func):
try:
return signature(func)
except ValueError:
# builtin methods don't have sigantures
return Signature(
[
Parameter("args", Parameter.VAR_POSITIONAL),
Parameter("kwargs", Parameter.VAR_KEYWORD),
]
)
def _signature_breakdown(func):
args = []
defaults = []
var_keyword = None
var_positional = None
sig = _safe_signature(func)
for param in sig.parameters.values():
if param.kind == Parameter.VAR_POSITIONAL:
var_positional = param.name
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword = param.name
else:
if param.default is not Parameter.empty:
defaults.append(param.default)
args.append(param.name)
return str(sig), tuple(args), defaults, var_positional, var_keyword
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
class MethodSpectator(object):
"""Notifies a :class:`Specator` when the method this descriptor wraps is called."""
def __init__(self, basemethod, name):
if not callable(basemethod):
if isinstance(basemethod, MethodSpectator):
basemethod = basemethod.basemethod
else:
raise TypeError("Expected a callable, not %r" % basemethod)
self.basemethod = basemethod
self.name = name
def call(self, obj, cls):
spectator = obj._instance_spectator
if hasattr(self.basemethod, "__get__"):
method = self.basemethod.__get__(obj, cls)
else:
method = self.basemethod
@wraps(method)
def wrapper(*args, **kwargs):
return spectator.call(obj, self.name, method, args, kwargs)
if not hasattr(wrapper, "__wrapped__"):
wrapper.__wrapped__ = method
return wrapper
def __get__(self, obj, cls):
if obj is None:
return self
elif hasattr(obj, "_instance_spectator"):
return self.call(obj, cls)
elif hasattr(self.basemethod, "__get__"):
return self.basemethod.__get__(obj, cls)
else:
return self.basemethod
class Watchable(object):
"""A base class for introspection.
And in Python>=3.6 rewraps overriden methods with a :class:`MethodSpectator`
if appropriate.
"""
if not sys.version_info < (3, 6):
def __init_subclass__(cls, **kwargs):
# If a subclass overrides a :class:`MethodSpectator` method, then rewrap it.
for base in cls.mro()[1:]:
if issubclass(base, Watchable):
for k, v in base.__dict__.items():
if k in cls.__dict__ and isinstance(v, MethodSpectator):
new = getattr(cls, k)
if callable(new) and not isinstance(new, MethodSpectator):
method_spectator = MethodSpectator(new, k)
setattr(cls, k, method_spectator)
super().__init_subclass__(**kwargs)
def expose(*methods):
"""A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
"""
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
def expose_as(name, base, *methods):
"""Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
"""
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
def watchable(value):
"""Returns True if the given value is a :class:`Watchable` subclass or instance."""
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
def unwatch(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
def watcher(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
return getattr(value, "_instance_spectator", None)
class Data(collections.Mapping):
"""An immutable mapping with attribute-access.
Empty keys are represented with a value of ``None``.
In order to evolve :class:`Data`, users must create copies that
contain updates:
.. code-block:: python
d1 = Data(a=1)
d2 = Data(b=2)
assert Data(d1, **d2) == {'a': 1, 'b': 2}
Easing this fact, is :class:`Data`'s syntactic sugar:
.. code-block:: python
d1 = Data(a=1)
assert d1 == {'a': 1}
d2 = d1['b': 2]
assert d2 == {'a': 1, 'b': 2}
d3 = d2['a': None, 'b': 1]
assert d3 == {'b': 1}
d4 = d3[{'a': 1, 'c': 3}, {'b': None}]
assert d4 == {'a': 1, 'c': 3}
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getattr__(self, key):
return None
def __getitem__(self, key):
if type(key) is slice:
key = (key,)
if type(key) is tuple:
for x in key:
if not isinstance(x, slice):
break
else:
new = {s.start: s.stop for s in key}
return type(self)(self, **new)
merge = {}
for x in key:
if isinstance(x, collections.Mapping):
merge.update(x)
key = merge
if isinstance(key, collections.Mapping):
return type(self)(self, **key)
return self.__dict__.get(key)
def __setitem__(self, key, value):
raise TypeError("%r is immutable")
def __setattr__(self, key, value):
raise TypeError("%r is immutable")
def __delitem__(self, key):
raise TypeError("%r is immutable")
def __delattr__(self, key):
raise TypeError("%r is immutable")
def __contains__(self, key):
return key in tuple(self)
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/core.py
|
unwatch
|
python
|
def unwatch(value):
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
spectator = watcher(value)
try:
del value._instance_spectator
except Exception:
pass
return spectator
|
Return the :class:`Specatator` of a :class:`Watchable` instance.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L362-L371
|
[
"def watcher(value):\n \"\"\"Return the :class:`Specatator` of a :class:`Watchable` instance.\"\"\"\n if not isinstance(value, Watchable):\n raise TypeError(\"Expected a Watchable, not %r.\" % value)\n return getattr(value, \"_instance_spectator\", None)\n"
] |
# See End Of File For Licensing
import sys
import inspect
import collections
from functools import wraps
try:
from inspect import signature, Parameter, Signature
except ImportError:
from funcsigs import signature, Parameter, Signature
__all__ = [
"expose",
"expose_as",
"watch",
"watched",
"unwatch",
"watcher",
"watchable",
"Watchable",
"Data",
]
def _safe_signature(func):
try:
return signature(func)
except ValueError:
# builtin methods don't have sigantures
return Signature(
[
Parameter("args", Parameter.VAR_POSITIONAL),
Parameter("kwargs", Parameter.VAR_KEYWORD),
]
)
def _signature_breakdown(func):
args = []
defaults = []
var_keyword = None
var_positional = None
sig = _safe_signature(func)
for param in sig.parameters.values():
if param.kind == Parameter.VAR_POSITIONAL:
var_positional = param.name
elif param.kind == Parameter.VAR_KEYWORD:
var_keyword = param.name
else:
if param.default is not Parameter.empty:
defaults.append(param.default)
args.append(param.name)
return str(sig), tuple(args), defaults, var_positional, var_keyword
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
class MethodSpectator(object):
"""Notifies a :class:`Specator` when the method this descriptor wraps is called."""
def __init__(self, basemethod, name):
if not callable(basemethod):
if isinstance(basemethod, MethodSpectator):
basemethod = basemethod.basemethod
else:
raise TypeError("Expected a callable, not %r" % basemethod)
self.basemethod = basemethod
self.name = name
def call(self, obj, cls):
spectator = obj._instance_spectator
if hasattr(self.basemethod, "__get__"):
method = self.basemethod.__get__(obj, cls)
else:
method = self.basemethod
@wraps(method)
def wrapper(*args, **kwargs):
return spectator.call(obj, self.name, method, args, kwargs)
if not hasattr(wrapper, "__wrapped__"):
wrapper.__wrapped__ = method
return wrapper
def __get__(self, obj, cls):
if obj is None:
return self
elif hasattr(obj, "_instance_spectator"):
return self.call(obj, cls)
elif hasattr(self.basemethod, "__get__"):
return self.basemethod.__get__(obj, cls)
else:
return self.basemethod
class Watchable(object):
"""A base class for introspection.
And in Python>=3.6 rewraps overriden methods with a :class:`MethodSpectator`
if appropriate.
"""
if not sys.version_info < (3, 6):
def __init_subclass__(cls, **kwargs):
# If a subclass overrides a :class:`MethodSpectator` method, then rewrap it.
for base in cls.mro()[1:]:
if issubclass(base, Watchable):
for k, v in base.__dict__.items():
if k in cls.__dict__ and isinstance(v, MethodSpectator):
new = getattr(cls, k)
if callable(new) and not isinstance(new, MethodSpectator):
method_spectator = MethodSpectator(new, k)
setattr(cls, k, method_spectator)
super().__init_subclass__(**kwargs)
def expose(*methods):
"""A decorator for exposing the methods of a class.
Parameters
----------
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new :class:`Watchable` that will
notify a :class:`Spectator` when those methods are called.
Notes
-----
This is essentially a decorator version of :func:`expose_as`
"""
def setup(base):
return expose_as(base.__name__, base, *methods)
return setup
def expose_as(name, base, *methods):
"""Return a new type with certain methods that are exposed to callback registration.
Parameters
----------
name : str
The name of the new type.
base : type
A type such as list or dict.
*methods : str
A str representation of the methods that should be exposed to callbacks.
Returns
-------
exposed : obj:
A :class:`Watchable` with methods that will notify a :class:`Spectator`.
"""
classdict = {}
for method in methods:
if not hasattr(base, method):
raise AttributeError(
"Cannot expose '%s', because '%s' "
"instances lack this method" % (method, base.__name__)
)
else:
classdict[method] = MethodSpectator(getattr(base, method), method)
return type(name, (base, Watchable), classdict)
def watchable(value):
"""Returns True if the given value is a :class:`Watchable` subclass or instance."""
check = issubclass if inspect.isclass(value) else isinstance
return check(value, Watchable)
def watch(value, spectator_type=Spectator):
"""Register a :class:`Specatator` to a :class:`Watchable` and return it.
In order to register callbacks to an eventful object, you need to create
a Spectator that will watch it for you. A :class:`Specatator` is a relatively simple
object that has methods for adding, deleting, and triggering callbacks. To
create a spectator we call ``spectator = watch(x)``, where x is a Watchable
instance.
Parameters
----------
value : Watchable
A :class:`Watchable` instance.
spectator_type : Spectator
The type of spectator that will be returned.
Returns
-------
spectator: spectator_type
The :class:`Specatator` (specified by ``spectator_type``) that is
was registered to the given instance.
"""
if isinstance(value, Watchable):
wtype = type(value)
else:
raise TypeError("Expected a Watchable, not %r." % value)
spectator = getattr(value, "_instance_spectator", None)
if not isinstance(spectator, Spectator):
spectator = spectator_type(wtype)
value._instance_spectator = spectator
return spectator
def watched(cls, *args, **kwargs):
"""Create and return a :class:`Watchable` with its :class:`Specatator`.
See :func:`watch` for more info on :class:`Specatator` registration.
Parameters
----------
cls: type:
A subclass of :class:`Watchable`
*args:
Positional arguments used to create the instance
**kwargs:
Keyword arguments used to create the instance.
"""
value = cls(*args, **kwargs)
return value, watch(value)
def watcher(value):
"""Return the :class:`Specatator` of a :class:`Watchable` instance."""
if not isinstance(value, Watchable):
raise TypeError("Expected a Watchable, not %r." % value)
return getattr(value, "_instance_spectator", None)
class Data(collections.Mapping):
"""An immutable mapping with attribute-access.
Empty keys are represented with a value of ``None``.
In order to evolve :class:`Data`, users must create copies that
contain updates:
.. code-block:: python
d1 = Data(a=1)
d2 = Data(b=2)
assert Data(d1, **d2) == {'a': 1, 'b': 2}
Easing this fact, is :class:`Data`'s syntactic sugar:
.. code-block:: python
d1 = Data(a=1)
assert d1 == {'a': 1}
d2 = d1['b': 2]
assert d2 == {'a': 1, 'b': 2}
d3 = d2['a': None, 'b': 1]
assert d3 == {'b': 1}
d4 = d3[{'a': 1, 'c': 3}, {'b': None}]
assert d4 == {'a': 1, 'c': 3}
"""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getattr__(self, key):
return None
def __getitem__(self, key):
if type(key) is slice:
key = (key,)
if type(key) is tuple:
for x in key:
if not isinstance(x, slice):
break
else:
new = {s.start: s.stop for s in key}
return type(self)(self, **new)
merge = {}
for x in key:
if isinstance(x, collections.Mapping):
merge.update(x)
key = merge
if isinstance(key, collections.Mapping):
return type(self)(self, **key)
return self.__dict__.get(key)
def __setitem__(self, key, value):
raise TypeError("%r is immutable")
def __setattr__(self, key, value):
raise TypeError("%r is immutable")
def __delitem__(self, key):
raise TypeError("%r is immutable")
def __delattr__(self, key):
raise TypeError("%r is immutable")
def __contains__(self, key):
return key in tuple(self)
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
# The MIT License (MIT)
# Copyright (c) 2016 Ryan S. Morshead
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
rmorshea/spectate
|
spectate/core.py
|
Spectator.callback
|
python
|
def callback(self, name, before=None, after=None):
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
|
Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L74-L117
|
[
"def callback(self, name, before=None, after=None):\n \"\"\"Add a callback pair to this spectator.\n\n You can specify, with keywords, whether each callback should be triggered\n before, and/or or after a given method is called - hereafter refered to as\n \"beforebacks\" and \"afterbacks\" respectively.\n\n Parameters\n ----------\n name: str\n The name of the method to which callbacks should respond.\n before: None or callable\n A callable of the form ``before(obj, call)`` where ``obj`` is\n the instance which called a watched method, and ``call`` is a\n :class:`Data` containing the name of the called method, along with\n its positional and keyword arguments under the attributes \"name\"\n \"args\", and \"kwargs\" respectively.\n after: None or callable\n A callable of the form ``after(obj, answer)`` where ``obj` is\n the instance which alled a watched method, and ``answer`` is a\n :class:`Data` containing the name of the called method, along with\n the value it returned, and data ``before`` may have returned\n under the attributes \"name\", \"value\", and \"before\" respectively.\n \"\"\"\n if isinstance(name, (list, tuple)):\n for name in name:\n self.callback(name, before, after)\n else:\n if not isinstance(getattr(self.subclass, name), MethodSpectator):\n raise ValueError(\"No method specator for '%s'\" % name)\n if before is None and after is None:\n raise ValueError(\"No pre or post '%s' callbacks were given\" % name)\n elif before is not None and not callable(before):\n raise ValueError(\"Expected a callable, not %r.\" % before)\n elif after is not None and not callable(after):\n raise ValueError(\"Expected a callable, not %r.\" % after)\n elif before is None and after is None:\n raise ValueError(\"No callbacks were given.\")\n if name in self._callback_registry:\n callback_list = self._callback_registry[name]\n else:\n callback_list = []\n self._callback_registry[name] = callback_list\n callback_list.append((before, after))\n"
] |
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
|
rmorshea/spectate
|
spectate/core.py
|
Spectator.remove_callback
|
python
|
def remove_callback(self, name, before=None, after=None):
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
|
Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L119-L149
|
[
"def remove_callback(self, name, before=None, after=None):\n \"\"\"Remove a beforeback, and afterback pair from this Spectator\n\n If ``before`` and ``after`` are None then all callbacks for\n the given method will be removed. Otherwise, only the exact\n callback pair will be removed.\n\n Parameters\n ----------\n name: str\n The name of the method the callback pair is associated with.\n before: None or callable\n The beforeback that was originally registered to the given method.\n after: None or callable\n The afterback that was originally registered to the given method.\n \"\"\"\n if isinstance(name, (list, tuple)):\n for name in name:\n self.remove_callback(name, before, after)\n elif before is None and after is None:\n del self._callback_registry[name]\n else:\n if name in self._callback_registry:\n callback_list = self._callback_registry[name]\n else:\n callback_list = []\n self._callback_registry[name] = callback_list\n callback_list.remove((before, after))\n if len(callback_list) == 0:\n # cleanup if all callbacks are gone\n del self._callback_registry[name]\n"
] |
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
|
rmorshea/spectate
|
spectate/core.py
|
Spectator.call
|
python
|
def call(self, obj, name, method, args, kwargs):
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs)
|
Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
|
train
|
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L151-L186
| null |
class Spectator(object):
"""An object for holding callbacks"""
def __init__(self, subclass):
"""Create a Spectator that can be registered to a :class:`Watchable` instance.
Parameters
----------
subclass: type
A the :class:`Watchable` subclass whose instance this
:class:`Specatator` can respond to.
"""
if not issubclass(subclass, Watchable):
raise TypeError("Expected a Watchable, not %r." % subclass)
self.subclass = subclass
self._callback_registry = {}
def callback(self, name, before=None, after=None):
"""Add a callback pair to this spectator.
You can specify, with keywords, whether each callback should be triggered
before, and/or or after a given method is called - hereafter refered to as
"beforebacks" and "afterbacks" respectively.
Parameters
----------
name: str
The name of the method to which callbacks should respond.
before: None or callable
A callable of the form ``before(obj, call)`` where ``obj`` is
the instance which called a watched method, and ``call`` is a
:class:`Data` containing the name of the called method, along with
its positional and keyword arguments under the attributes "name"
"args", and "kwargs" respectively.
after: None or callable
A callable of the form ``after(obj, answer)`` where ``obj` is
the instance which alled a watched method, and ``answer`` is a
:class:`Data` containing the name of the called method, along with
the value it returned, and data ``before`` may have returned
under the attributes "name", "value", and "before" respectively.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.callback(name, before, after)
else:
if not isinstance(getattr(self.subclass, name), MethodSpectator):
raise ValueError("No method specator for '%s'" % name)
if before is None and after is None:
raise ValueError("No pre or post '%s' callbacks were given" % name)
elif before is not None and not callable(before):
raise ValueError("Expected a callable, not %r." % before)
elif after is not None and not callable(after):
raise ValueError("Expected a callable, not %r." % after)
elif before is None and after is None:
raise ValueError("No callbacks were given.")
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.append((before, after))
def remove_callback(self, name, before=None, after=None):
"""Remove a beforeback, and afterback pair from this Spectator
If ``before`` and ``after`` are None then all callbacks for
the given method will be removed. Otherwise, only the exact
callback pair will be removed.
Parameters
----------
name: str
The name of the method the callback pair is associated with.
before: None or callable
The beforeback that was originally registered to the given method.
after: None or callable
The afterback that was originally registered to the given method.
"""
if isinstance(name, (list, tuple)):
for name in name:
self.remove_callback(name, before, after)
elif before is None and after is None:
del self._callback_registry[name]
else:
if name in self._callback_registry:
callback_list = self._callback_registry[name]
else:
callback_list = []
self._callback_registry[name] = callback_list
callback_list.remove((before, after))
if len(callback_list) == 0:
# cleanup if all callbacks are gone
del self._callback_registry[name]
|
zmathew/django-backbone
|
backbone/__init__.py
|
autodiscover
|
python
|
def autodiscover():
# This code is based off django.contrib.admin.__init__
from django.conf import settings
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from backbone.views import BackboneAPIView # This is to prevent a circular import issue
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's backbone module.
try:
import_module('%s.backbone_api' % app)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have an backbone module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'backbone_api'):
raise
|
Auto-discover INSTALLED_APPS backbone_api.py modules.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/__init__.py#L16-L41
| null |
"""Provides a Backbone.js compatible REST API for your models using Django Admin style registration."""
from __future__ import unicode_literals
from backbone.sites import BackboneSite
VERSION = (0, 3, 2)
__version__ = '.'.join(map(str, VERSION))
site = BackboneSite()
|
zmathew/django-backbone
|
backbone/sites.py
|
BackboneSite.register
|
python
|
def register(self, backbone_view_class):
if backbone_view_class not in self._registry:
self._registry.append(backbone_view_class)
|
Registers the given backbone view class.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/sites.py#L10-L15
| null |
class BackboneSite(object):
def __init__(self, name='backbone'):
self._registry = []
self.name = name
def unregister(self, backbone_view_class):
if backbone_view_class in self._registry:
self._registry.remove(backbone_view_class)
def get_urls(self):
from django.conf.urls import url
urlpatterns = []
for view_class in self._registry:
app_label = view_class.model._meta.app_label
opts = view_class.model._meta
url_slug = view_class.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_path_prefix = r'^%s/%s' % (app_label, url_slug)
base_url_name = '%s_%s' % (app_label, url_slug)
urlpatterns = urlpatterns + [
url(url_path_prefix + '$', view_class.as_view(), name=base_url_name),
url(url_path_prefix + '/(?P<id>\d+)$', view_class.as_view(),
name=base_url_name + '_detail')
]
return urlpatterns
@property
def urls(self):
return (self.get_urls(), 'backbone', self.name)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.queryset
|
python
|
def queryset(self, request, **kwargs):
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
|
Returns the queryset (along with ordering) to be used when retrieving object(s).
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L29-L36
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.get
|
python
|
def get(self, request, id=None, **kwargs):
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
|
Handles get requests for either the collection or an object detail.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L38-L49
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.get_object_detail
|
python
|
def get_object_detail(self, request, obj):
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
|
Handles get requests for the details of the given object.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L51-L61
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.get_collection
|
python
|
def get_collection(self, request, **kwargs):
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
|
Handles get requests for the list of objects.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L63-L88
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.post
|
python
|
def post(self, request, id=None, **kwargs):
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
|
Handles post requests.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L90-L101
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.add_object
|
python
|
def add_object(self, request):
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
|
Adds an object.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L103-L133
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.put
|
python
|
def put(self, request, id=None, **kwargs):
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
|
Handles put requests.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L135-L147
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.update_object
|
python
|
def update_object(self, request, obj):
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
|
Updates an object.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L149-L169
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.get_form_instance
|
python
|
def get_form_instance(self, request, data=None, instance=None):
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
|
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L171-L183
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.delete
|
python
|
def delete(self, request, id=None):
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
|
Handles delete requests.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L185-L197
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.has_add_permission
|
python
|
def has_add_permission(self, request):
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
|
Returns True if the requesting user is allowed to add an object, False otherwise.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L212-L219
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.has_update_permission
|
python
|
def has_update_permission(self, request, obj):
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
|
Returns True if the requesting user is allowed to update the given object, False otherwise.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L231-L238
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.serialize
|
python
|
def serialize(self, obj, fields):
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
|
Serializes a single model instance to a Python dict, based on the specified list of fields.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L259-L295
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
zmathew/django-backbone
|
backbone/views.py
|
BackboneAPIView.json_dumps
|
python
|
def json_dumps(self, data, **options):
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params)
|
Wrapper around `json.dumps` that uses a special JSON encoder.
|
train
|
https://github.com/zmathew/django-backbone/blob/53505a247fb058e64a103c4f11da66993037bd6b/backbone/views.py#L297-L307
| null |
class BackboneAPIView(View):
model = None # The model to be used for this API definition
display_fields = [] # Fields to return for read (GET) requests,
display_collection_fields = [] # Specific fields to return for a read (GET) request of a model collection
display_detail_fields = [] # Specific fields to return for read (GET) requests for a specific model
fields = [] # Fields to allow when adding (POST) or editing (PUT) objects.
form = None # The form class to be used for adding or editing objects.
ordering = None # Ordering used when retrieving the collection
paginate_by = None # The max number of objects per page (enables use of the ``page`` GET parameter).
url_slug = None # The slug to be used when constructing the url (and url name) for this view.
# Defaults to lowercase model name. Change this if you have multiple views for the same model.
def queryset(self, request, **kwargs):
"""
Returns the queryset (along with ordering) to be used when retrieving object(s).
"""
qs = self.model._default_manager.all()
if self.ordering:
qs = qs.order_by(*self.ordering)
return qs
def get(self, request, id=None, **kwargs):
"""
Handles get requests for either the collection or an object detail.
"""
if not self.has_get_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
if id:
obj = get_object_or_404(self.queryset(request, **kwargs), id=id)
return self.get_object_detail(request, obj)
else:
return self.get_collection(request, **kwargs)
def get_object_detail(self, request, obj):
"""
Handles get requests for the details of the given object.
"""
if self.display_detail_fields:
display_fields = self.display_detail_fields
else:
display_fields = self.display_fields
data = self.serialize(obj, ['id'] + list(display_fields))
return HttpResponse(self.json_dumps(data), content_type='application/json')
def get_collection(self, request, **kwargs):
"""
Handles get requests for the list of objects.
"""
qs = self.queryset(request, **kwargs)
if self.display_collection_fields:
display_fields = self.display_collection_fields
else:
display_fields = self.display_fields
if self.paginate_by is not None:
page = request.GET.get('page', 1)
paginator = Paginator(qs, self.paginate_by)
try:
qs = paginator.page(page).object_list
except PageNotAnInteger:
data = _('Invalid `page` parameter: Not a valid integer.')
return HttpResponseBadRequest(data)
except EmptyPage:
data = _('Invalid `page` parameter: Out of range.')
return HttpResponseBadRequest(data)
data = [
self.serialize(obj, ['id'] + list(display_fields)) for obj in qs
]
return HttpResponse(self.json_dumps(data), content_type='application/json')
def post(self, request, id=None, **kwargs):
"""
Handles post requests.
"""
if id:
# No posting to an object detail page
return HttpResponseForbidden()
else:
if not self.has_add_permission(request):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.add_object(request)
def add_object(self, request):
"""
Adds an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data)
if form.is_valid():
if not self.has_add_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
obj = form.save()
# We return the newly created object's details and a Location header with it's url
response = self.get_object_detail(request, obj)
response.status_code = 201
opts = self.model._meta
url_slug = self.url_slug or (
opts.model_name if hasattr(opts, 'model_name') else opts.module_name
)
url_name = 'backbone:%s_%s_detail' % (self.model._meta.app_label, url_slug)
response['Location'] = reverse(url_name, args=[obj.id])
return response
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def put(self, request, id=None, **kwargs):
"""
Handles put requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_update_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.update_object(request, obj)
else:
# No putting on a collection.
return HttpResponseForbidden()
def update_object(self, request, obj):
"""
Updates an object.
"""
try:
# backbone sends data in the body in json format
# Conditional statement is for backwards compatibility with Django <= 1.3
data = json.loads(request.body if hasattr(request, 'body') else request.raw_post_data)
except ValueError:
return HttpResponseBadRequest(_('Unable to parse JSON request body.'))
form = self.get_form_instance(request, data=data, instance=obj)
if form.is_valid():
if not self.has_update_permission_for_data(request, form.cleaned_data):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
form.save()
# We return the updated object details
return self.get_object_detail(request, obj)
else:
return HttpResponseBadRequest(self.json_dumps(form.errors), content_type='application/json')
def get_form_instance(self, request, data=None, instance=None):
"""
Returns an instantiated form to be used for adding or editing an object.
The `instance` argument is the model instance (passed only if this form
is going to be used for editing an existing object).
"""
defaults = {}
if self.form:
defaults['form'] = self.form
if self.fields:
defaults['fields'] = self.fields
return modelform_factory(self.model, **defaults)(data=data, instance=instance)
def delete(self, request, id=None):
"""
Handles delete requests.
"""
if id:
obj = get_object_or_404(self.queryset(request), id=id)
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(_('You do not have permission to perform this action.'))
else:
return self.delete_object(request, obj)
else:
# No delete requests allowed on collection view
return HttpResponseForbidden()
def delete_object(self, request, obj):
"""
Deletes the the given object.
"""
obj.delete()
return HttpResponse(status=204)
def has_get_permission(self, request):
"""
Returns True if the requesting user is allowed to retrieve objects.
"""
return True
def has_add_permission(self, request):
"""
Returns True if the requesting user is allowed to add an object, False otherwise.
"""
perm_string = '%s.add_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_add_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to add an object with the
given data, False otherwise.
If the add permission does not depend on the data being submitted,
use `has_add_permission` instead.
"""
return True
def has_update_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to update the given object, False otherwise.
"""
perm_string = '%s.change_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def has_update_permission_for_data(self, request, cleaned_data):
"""
Returns True if the requesting user is allowed to update the object with the
given data, False otherwise.
If the update permission does not depend on the data being submitted,
use `has_update_permission` instead.
"""
return True
def has_delete_permission(self, request, obj):
"""
Returns True if the requesting user is allowed to delete the given object, False otherwise.
"""
perm_string = '%s.delete_%s' % (self.model._meta.app_label,
self.model._meta.object_name.lower()
)
return request.user.has_perm(perm_string)
def serialize(self, obj, fields):
"""
Serializes a single model instance to a Python dict, based on the specified list of fields.
"""
data = {}
remaining_fields = []
for field in fields:
if callable(field): # Callable
data[field.__name__] = field(obj)
elif hasattr(self, field) and callable(getattr(self, field)): # Method on the view
data[field] = getattr(self, field)(obj)
elif hasattr(obj, field): # Callable/property/field on the model
attr = getattr(obj, field)
if isinstance(attr, Model):
data[field] = attr.pk
elif isinstance(attr, Manager):
data[field] = [item['pk'] for item in attr.values('pk')]
elif callable(attr): # Callable on the model
data[field] = attr()
else:
remaining_fields.append(field)
else:
raise AttributeError('Invalid field: %s' % field)
# Add on db fields
serializer = Serializer()
serializer.serialize([obj], fields=list(remaining_fields))
data.update(serializer.getvalue()[0]['fields'])
# Any remaining fields should be properties on the model
remaining_fields = set(remaining_fields) - set(data.keys())
for field in remaining_fields:
data[field] = getattr(obj, field)
return data
|
nakagami/pyfirebirdsql
|
firebirdsql/utils.py
|
hex_to_bytes
|
python
|
def hex_to_bytes(s):
if len(s) % 2:
s = b'0' + s
ia = [int(s[i:i+2], 16) for i in range(0, len(s), 2)] # int array
return bs(ia) if PYTHON_MAJOR_VER == 3 else b''.join([chr(c) for c in ia])
|
convert hex string to bytes
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/utils.py#L43-L50
|
[
"def bs(byte_array):\n if PYTHON_MAJOR_VER == 2:\n return ''.join([chr(c) for c in byte_array])\n else:\n return bytes(byte_array)\n"
] |
##############################################################################
# Copyright (c) 2014-2018, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
import sys
import binascii
import struct
from firebirdsql import InternalError
PYTHON_MAJOR_VER = sys.version_info[0]
def bs(byte_array):
if PYTHON_MAJOR_VER == 2:
return ''.join([chr(c) for c in byte_array])
else:
return bytes(byte_array)
def bytes_to_hex(b):
"""
convert bytes to hex string
"""
s = binascii.b2a_hex(b)
return s
def bytes_to_bint(b, u=False): # Read as big endian
if u:
fmtmap = {1: 'B', 2: '>H', 4: '>L', 8: '>Q'}
else:
fmtmap = {1: 'b', 2: '>h', 4: '>l', 8: '>q'}
fmt = fmtmap.get(len(b))
if fmt is None:
raise InternalError("Invalid bytes length:%d" % (len(b), ))
return struct.unpack(fmt, b)[0]
def bytes_to_int(b): # Read as little endian.
fmtmap = {1: 'b', 2: '<h', 4: '<l', 8: '<q'}
fmt = fmtmap.get(len(b))
if fmt is None:
raise InternalError("Invalid bytes length:%d" % (len(b), ))
return struct.unpack(fmt, b)[0]
def bytes_to_uint(b): # Read as little endian unsigned int.
fmtmap = {1: 'B', 2: '<H', 4: '<L', 8: '<Q'}
fmt = fmtmap.get(len(b))
if fmt is None:
raise InternalError("Invalid bytes length:%d" % (len(b), ))
return struct.unpack(fmt, b)[0]
def bint_to_bytes(val, nbytes): # Convert int value to big endian bytes.
v = abs(val)
b = []
for n in range(nbytes):
b.append((v >> (8 * (nbytes - n - 1)) & 0xff))
if val < 0:
for i in range(nbytes):
b[i] = ~b[i] + 256
b[-1] += 1
for i in range(nbytes):
if b[nbytes - i - 1] == 256:
b[nbytes - i - 1] = 0
b[nbytes - i - 2] += 1
return bs(b)
def int_to_bytes(val, nbytes): # Convert int value to little endian bytes.
v = abs(val)
b = []
for n in range(nbytes):
b.append((v >> (8 * n)) & 0xff)
if val < 0:
for i in range(nbytes):
b[i] = ~b[i] + 256
b[0] += 1
for i in range(nbytes):
if b[i] == 256:
b[i] = 0
b[i+1] += 1
return bs(b)
def byte_to_int(b):
"byte to int"
if PYTHON_MAJOR_VER == 3:
return b
else:
return ord(b)
|
nakagami/pyfirebirdsql
|
firebirdsql/srp.py
|
client_seed
|
python
|
def client_seed(a=random.randrange(0, 1 << SRP_KEY_SIZE)):
if DEBUG:
a = DEBUG_PRIVATE_KEY
N, g, k = get_prime()
A = pow(g, a, N)
if DEBUG_PRINT:
print('a=', binascii.b2a_hex(long2bytes(a)), end='\n')
print('A=', binascii.b2a_hex(long2bytes(A)), end='\n')
return A, a
|
A: Client public key
a: Client private key
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/srp.py#L181-L193
|
[
"def get_prime():\n N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7\n g = 2\n\n #k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))\n k = 1277432915985975349439481660349303019122249719989\n\n return N, g, k\n"
] |
##############################################################################
# Copyright (c) 2014-2016, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
# This SRP implementation is in reference to
'''
Following document was copied from <http://srp.stanford.edu/design.html>.
-----
SRP Protocol Design
SRP is the newest addition to a new class of strong authentication protocols
that resist all the well-known passive and active attacks over the network. SRP
borrows some elements from other key-exchange and identification protcols and
adds some subtlee modifications and refinements. The result is a protocol that
preserves the strength and efficiency of the EKE family protocols while fixing
some of their shortcomings.
The following is a description of SRP-6 and 6a, the latest versions of SRP:
N A large safe prime (N = 2q+1, where q is prime)
All arithmetic is done modulo N.
g A generator modulo N
k Multiplier parameter (k = H(N, g) in SRP-6a, k = 3 for legacy SRP-6)
s User's salt
I Username
p Cleartext Password
H() One-way hash function
^ (Modular) Exponentiation
u Random scrambling parameter
a,b Secret ephemeral values
A,B Public ephemeral values
x Private key (derived from p and s)
v Password verifier
The host stores passwords using the following formula:
x = H(s, p) (s is chosen randomly)
v = g^x (computes password verifier)
The host then keeps {I, s, v} in its password database. The authentication
protocol itself goes as follows:
User -> Host: I, A = g^a (identifies self, a = random number)
Host -> User: s, B = kv + g^b (sends salt, b = random number)
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
Now the two parties have a shared, strong session key K. To complete
authentication, they need to prove to each other that their keys match.
One possible way:
User -> Host: M = H(H(N) xor H(g), H(I), s, A, B, K)
Host -> User: H(A, M, K)
The two parties also employ the following safeguards:
1. The user will abort if he receives B == 0 (mod N) or u == 0.
2. The host will abort if it detects that A == 0 (mod N).
3. The user must show his proof of K first. If the server detects that the user's proof is incorrect, it must abort without showing its own proof of K.
See http://srp.stanford.edu/ for more information.
'''
from __future__ import print_function
import sys
import hashlib
import random
import binascii
DEBUG = False
DEBUG_PRINT = False
DEBUG_PRIVATE_KEY = 0x60975527035CF2AD1989806F0407210BC81EDC04E2762A56AFD529DDDA2D4393
DEBUG_SALT = binascii.unhexlify('02E268803000000079A478A700000002D1A6979000000026E1601C000000054F')
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
SRP_KEY_SIZE = 128
SRP_SALT_SIZE = 32
def get_prime():
N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7
g = 2
#k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))
k = 1277432915985975349439481660349303019122249719989
return N, g, k
def bytes2long(s):
n = 0
for c in s:
n <<= 8
n += ord(c)
return n
def long2bytes(n):
s = []
while n > 0:
s.insert(0, n & 255)
n >>= 8
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def hash_digest(hash_algo, *args):
algo = hash_algo()
for v in args:
if not isinstance(v, bytes):
v = long2bytes(v)
algo.update(v)
return algo.digest()
def pad(n):
s = []
for x in range(SRP_KEY_SIZE):
s.insert(0, n & 255)
n >>= 8
if n == 0:
break
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def get_scramble(x, y):
return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))
def getUserHash(salt, user, password):
assert isinstance(user, bytes)
assert isinstance(password, bytes)
hash1 = hash_digest(hashlib.sha1, user, b':', password)
hash2 = hash_digest(hashlib.sha1, salt, hash1)
rc = bytes2long(hash2)
return rc
def server_seed(v, b=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
B: Server public key
b: Server private key
"""
N, g, k = get_prime()
if DEBUG:
b = DEBUG_PRIVATE_KEY
gb = pow(g, b, N)
kv = (k * v) % N
B = (kv + gb) % N
if DEBUG_PRINT:
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print('b=', binascii.b2a_hex(long2bytes(b)), end='\n')
print("gb", binascii.b2a_hex(long2bytes(gb)), end='\n')
print("k", binascii.b2a_hex(long2bytes(k)), end='\n')
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print("kv", binascii.b2a_hex(long2bytes(kv)), end='\n')
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
return B, b
def client_session(user, password, salt, A, B, a):
"""
Client session secret
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
x = getUserHash(salt, user, password) # x
gx = pow(g, x, N) # g^x
kgx = (k * gx) % N # kg^x
diff = (B - kgx) % N # B - kg^x
ux = (u * x) % N
aux = (a + ux) % N
session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux)
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
print('u=', binascii.b2a_hex(long2bytes(u)), end='\n')
print('x=', binascii.b2a_hex(long2bytes(x)), end='\n')
print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\n')
print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\n')
print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\n')
print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\n')
print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\n')
print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('session_key:K=', binascii.b2a_hex(K))
return K
def server_session(user, password, salt, A, B, b):
"""
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = (A * vu) % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K))
return K
def client_proof(user, password, salt, A, B, a, hash_algo):
"""
M = H(H(N) xor H(g), H(I), s, A, B, K)
"""
N, g, k = get_prime()
K = client_session(user, password, salt, A, B, a)
n1 = bytes2long(hash_digest(hashlib.sha1, N))
n2 = bytes2long(hash_digest(hashlib.sha1, g))
if DEBUG_PRINT:
print('n1-1=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-1=', binascii.b2a_hex(long2bytes(n2)), end='\n')
n1 = pow(n1, n2, N)
n2 = bytes2long(hash_digest(hashlib.sha1, user))
M = hash_digest(hash_algo, n1, n2, salt, A, B, K)
if DEBUG_PRINT:
print('n1-2=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-2=', binascii.b2a_hex(long2bytes(n2)), end='\n')
print('client_proof:M=', binascii.b2a_hex(M), end='\n')
return M, K
def get_salt():
if PYTHON_MAJOR_VER == 3:
salt = bytes([random.randrange(0, 256) for x in range(SRP_SALT_SIZE)])
else:
salt = b''.join([chr(random.randrange(0, 256)) for x in range(SRP_SALT_SIZE)])
if DEBUG:
salt = DEBUG_SALT
if DEBUG_PRINT:
print('salt=', binascii.b2a_hex(salt), end='\n')
return salt
def get_verifier(user, password, salt):
N, g, k = get_prime()
x = getUserHash(salt, user, password)
return pow(g, x, N)
if __name__ == '__main__':
"""
A, a, B, b are long.
salt, M are bytes.
client_key, serverKey are bytes.
"""
# Both
user = b'SYSDBA'
password = b'masterkey'
# Client send A to Server
A, a = client_seed()
# Server send B, salt to Client
salt = get_salt()
v = get_verifier(user, password, salt)
B, b = server_seed(v)
serverKey = server_session(user, password, salt, A, B, b)
# sha1
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha1)
assert clientKey == serverKey
# sha256
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha256)
assert clientKey == serverKey
|
nakagami/pyfirebirdsql
|
firebirdsql/srp.py
|
server_seed
|
python
|
def server_seed(v, b=random.randrange(0, 1 << SRP_KEY_SIZE)):
N, g, k = get_prime()
if DEBUG:
b = DEBUG_PRIVATE_KEY
gb = pow(g, b, N)
kv = (k * v) % N
B = (kv + gb) % N
if DEBUG_PRINT:
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print('b=', binascii.b2a_hex(long2bytes(b)), end='\n')
print("gb", binascii.b2a_hex(long2bytes(gb)), end='\n')
print("k", binascii.b2a_hex(long2bytes(k)), end='\n')
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print("kv", binascii.b2a_hex(long2bytes(kv)), end='\n')
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
return B, b
|
B: Server public key
b: Server private key
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/srp.py#L196-L215
|
[
"def get_prime():\n N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7\n g = 2\n\n #k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))\n k = 1277432915985975349439481660349303019122249719989\n\n return N, g, k\n"
] |
##############################################################################
# Copyright (c) 2014-2016, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
# This SRP implementation is in reference to
'''
Following document was copied from <http://srp.stanford.edu/design.html>.
-----
SRP Protocol Design
SRP is the newest addition to a new class of strong authentication protocols
that resist all the well-known passive and active attacks over the network. SRP
borrows some elements from other key-exchange and identification protcols and
adds some subtlee modifications and refinements. The result is a protocol that
preserves the strength and efficiency of the EKE family protocols while fixing
some of their shortcomings.
The following is a description of SRP-6 and 6a, the latest versions of SRP:
N A large safe prime (N = 2q+1, where q is prime)
All arithmetic is done modulo N.
g A generator modulo N
k Multiplier parameter (k = H(N, g) in SRP-6a, k = 3 for legacy SRP-6)
s User's salt
I Username
p Cleartext Password
H() One-way hash function
^ (Modular) Exponentiation
u Random scrambling parameter
a,b Secret ephemeral values
A,B Public ephemeral values
x Private key (derived from p and s)
v Password verifier
The host stores passwords using the following formula:
x = H(s, p) (s is chosen randomly)
v = g^x (computes password verifier)
The host then keeps {I, s, v} in its password database. The authentication
protocol itself goes as follows:
User -> Host: I, A = g^a (identifies self, a = random number)
Host -> User: s, B = kv + g^b (sends salt, b = random number)
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
Now the two parties have a shared, strong session key K. To complete
authentication, they need to prove to each other that their keys match.
One possible way:
User -> Host: M = H(H(N) xor H(g), H(I), s, A, B, K)
Host -> User: H(A, M, K)
The two parties also employ the following safeguards:
1. The user will abort if he receives B == 0 (mod N) or u == 0.
2. The host will abort if it detects that A == 0 (mod N).
3. The user must show his proof of K first. If the server detects that the user's proof is incorrect, it must abort without showing its own proof of K.
See http://srp.stanford.edu/ for more information.
'''
from __future__ import print_function
import sys
import hashlib
import random
import binascii
DEBUG = False
DEBUG_PRINT = False
DEBUG_PRIVATE_KEY = 0x60975527035CF2AD1989806F0407210BC81EDC04E2762A56AFD529DDDA2D4393
DEBUG_SALT = binascii.unhexlify('02E268803000000079A478A700000002D1A6979000000026E1601C000000054F')
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
SRP_KEY_SIZE = 128
SRP_SALT_SIZE = 32
def get_prime():
N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7
g = 2
#k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))
k = 1277432915985975349439481660349303019122249719989
return N, g, k
def bytes2long(s):
n = 0
for c in s:
n <<= 8
n += ord(c)
return n
def long2bytes(n):
s = []
while n > 0:
s.insert(0, n & 255)
n >>= 8
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def hash_digest(hash_algo, *args):
algo = hash_algo()
for v in args:
if not isinstance(v, bytes):
v = long2bytes(v)
algo.update(v)
return algo.digest()
def pad(n):
s = []
for x in range(SRP_KEY_SIZE):
s.insert(0, n & 255)
n >>= 8
if n == 0:
break
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def get_scramble(x, y):
return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))
def getUserHash(salt, user, password):
assert isinstance(user, bytes)
assert isinstance(password, bytes)
hash1 = hash_digest(hashlib.sha1, user, b':', password)
hash2 = hash_digest(hashlib.sha1, salt, hash1)
rc = bytes2long(hash2)
return rc
def client_seed(a=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
A: Client public key
a: Client private key
"""
if DEBUG:
a = DEBUG_PRIVATE_KEY
N, g, k = get_prime()
A = pow(g, a, N)
if DEBUG_PRINT:
print('a=', binascii.b2a_hex(long2bytes(a)), end='\n')
print('A=', binascii.b2a_hex(long2bytes(A)), end='\n')
return A, a
def client_session(user, password, salt, A, B, a):
"""
Client session secret
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
x = getUserHash(salt, user, password) # x
gx = pow(g, x, N) # g^x
kgx = (k * gx) % N # kg^x
diff = (B - kgx) % N # B - kg^x
ux = (u * x) % N
aux = (a + ux) % N
session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux)
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
print('u=', binascii.b2a_hex(long2bytes(u)), end='\n')
print('x=', binascii.b2a_hex(long2bytes(x)), end='\n')
print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\n')
print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\n')
print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\n')
print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\n')
print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\n')
print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('session_key:K=', binascii.b2a_hex(K))
return K
def server_session(user, password, salt, A, B, b):
"""
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = (A * vu) % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K))
return K
def client_proof(user, password, salt, A, B, a, hash_algo):
"""
M = H(H(N) xor H(g), H(I), s, A, B, K)
"""
N, g, k = get_prime()
K = client_session(user, password, salt, A, B, a)
n1 = bytes2long(hash_digest(hashlib.sha1, N))
n2 = bytes2long(hash_digest(hashlib.sha1, g))
if DEBUG_PRINT:
print('n1-1=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-1=', binascii.b2a_hex(long2bytes(n2)), end='\n')
n1 = pow(n1, n2, N)
n2 = bytes2long(hash_digest(hashlib.sha1, user))
M = hash_digest(hash_algo, n1, n2, salt, A, B, K)
if DEBUG_PRINT:
print('n1-2=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-2=', binascii.b2a_hex(long2bytes(n2)), end='\n')
print('client_proof:M=', binascii.b2a_hex(M), end='\n')
return M, K
def get_salt():
if PYTHON_MAJOR_VER == 3:
salt = bytes([random.randrange(0, 256) for x in range(SRP_SALT_SIZE)])
else:
salt = b''.join([chr(random.randrange(0, 256)) for x in range(SRP_SALT_SIZE)])
if DEBUG:
salt = DEBUG_SALT
if DEBUG_PRINT:
print('salt=', binascii.b2a_hex(salt), end='\n')
return salt
def get_verifier(user, password, salt):
N, g, k = get_prime()
x = getUserHash(salt, user, password)
return pow(g, x, N)
if __name__ == '__main__':
"""
A, a, B, b are long.
salt, M are bytes.
client_key, serverKey are bytes.
"""
# Both
user = b'SYSDBA'
password = b'masterkey'
# Client send A to Server
A, a = client_seed()
# Server send B, salt to Client
salt = get_salt()
v = get_verifier(user, password, salt)
B, b = server_seed(v)
serverKey = server_session(user, password, salt, A, B, b)
# sha1
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha1)
assert clientKey == serverKey
# sha256
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha256)
assert clientKey == serverKey
|
nakagami/pyfirebirdsql
|
firebirdsql/srp.py
|
client_session
|
python
|
def client_session(user, password, salt, A, B, a):
N, g, k = get_prime()
u = get_scramble(A, B)
x = getUserHash(salt, user, password) # x
gx = pow(g, x, N) # g^x
kgx = (k * gx) % N # kg^x
diff = (B - kgx) % N # B - kg^x
ux = (u * x) % N
aux = (a + ux) % N
session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux)
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
print('u=', binascii.b2a_hex(long2bytes(u)), end='\n')
print('x=', binascii.b2a_hex(long2bytes(x)), end='\n')
print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\n')
print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\n')
print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\n')
print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\n')
print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\n')
print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('session_key:K=', binascii.b2a_hex(K))
return K
|
Client session secret
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/srp.py#L218-L249
|
[
"def get_prime():\n N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7\n g = 2\n\n #k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))\n k = 1277432915985975349439481660349303019122249719989\n\n return N, g, k\n",
"def hash_digest(hash_algo, *args):\n algo = hash_algo()\n for v in args:\n if not isinstance(v, bytes):\n v = long2bytes(v)\n algo.update(v)\n return algo.digest()\n",
"def get_scramble(x, y):\n return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))\n",
"def getUserHash(salt, user, password):\n assert isinstance(user, bytes)\n assert isinstance(password, bytes)\n hash1 = hash_digest(hashlib.sha1, user, b':', password)\n hash2 = hash_digest(hashlib.sha1, salt, hash1)\n rc = bytes2long(hash2)\n\n return rc\n"
] |
##############################################################################
# Copyright (c) 2014-2016, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
# This SRP implementation is in reference to
'''
Following document was copied from <http://srp.stanford.edu/design.html>.
-----
SRP Protocol Design
SRP is the newest addition to a new class of strong authentication protocols
that resist all the well-known passive and active attacks over the network. SRP
borrows some elements from other key-exchange and identification protcols and
adds some subtlee modifications and refinements. The result is a protocol that
preserves the strength and efficiency of the EKE family protocols while fixing
some of their shortcomings.
The following is a description of SRP-6 and 6a, the latest versions of SRP:
N A large safe prime (N = 2q+1, where q is prime)
All arithmetic is done modulo N.
g A generator modulo N
k Multiplier parameter (k = H(N, g) in SRP-6a, k = 3 for legacy SRP-6)
s User's salt
I Username
p Cleartext Password
H() One-way hash function
^ (Modular) Exponentiation
u Random scrambling parameter
a,b Secret ephemeral values
A,B Public ephemeral values
x Private key (derived from p and s)
v Password verifier
The host stores passwords using the following formula:
x = H(s, p) (s is chosen randomly)
v = g^x (computes password verifier)
The host then keeps {I, s, v} in its password database. The authentication
protocol itself goes as follows:
User -> Host: I, A = g^a (identifies self, a = random number)
Host -> User: s, B = kv + g^b (sends salt, b = random number)
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
Now the two parties have a shared, strong session key K. To complete
authentication, they need to prove to each other that their keys match.
One possible way:
User -> Host: M = H(H(N) xor H(g), H(I), s, A, B, K)
Host -> User: H(A, M, K)
The two parties also employ the following safeguards:
1. The user will abort if he receives B == 0 (mod N) or u == 0.
2. The host will abort if it detects that A == 0 (mod N).
3. The user must show his proof of K first. If the server detects that the user's proof is incorrect, it must abort without showing its own proof of K.
See http://srp.stanford.edu/ for more information.
'''
from __future__ import print_function
import sys
import hashlib
import random
import binascii
DEBUG = False
DEBUG_PRINT = False
DEBUG_PRIVATE_KEY = 0x60975527035CF2AD1989806F0407210BC81EDC04E2762A56AFD529DDDA2D4393
DEBUG_SALT = binascii.unhexlify('02E268803000000079A478A700000002D1A6979000000026E1601C000000054F')
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
SRP_KEY_SIZE = 128
SRP_SALT_SIZE = 32
def get_prime():
N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7
g = 2
#k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))
k = 1277432915985975349439481660349303019122249719989
return N, g, k
def bytes2long(s):
n = 0
for c in s:
n <<= 8
n += ord(c)
return n
def long2bytes(n):
s = []
while n > 0:
s.insert(0, n & 255)
n >>= 8
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def hash_digest(hash_algo, *args):
algo = hash_algo()
for v in args:
if not isinstance(v, bytes):
v = long2bytes(v)
algo.update(v)
return algo.digest()
def pad(n):
s = []
for x in range(SRP_KEY_SIZE):
s.insert(0, n & 255)
n >>= 8
if n == 0:
break
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def get_scramble(x, y):
return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))
def getUserHash(salt, user, password):
assert isinstance(user, bytes)
assert isinstance(password, bytes)
hash1 = hash_digest(hashlib.sha1, user, b':', password)
hash2 = hash_digest(hashlib.sha1, salt, hash1)
rc = bytes2long(hash2)
return rc
def client_seed(a=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
A: Client public key
a: Client private key
"""
if DEBUG:
a = DEBUG_PRIVATE_KEY
N, g, k = get_prime()
A = pow(g, a, N)
if DEBUG_PRINT:
print('a=', binascii.b2a_hex(long2bytes(a)), end='\n')
print('A=', binascii.b2a_hex(long2bytes(A)), end='\n')
return A, a
def server_seed(v, b=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
B: Server public key
b: Server private key
"""
N, g, k = get_prime()
if DEBUG:
b = DEBUG_PRIVATE_KEY
gb = pow(g, b, N)
kv = (k * v) % N
B = (kv + gb) % N
if DEBUG_PRINT:
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print('b=', binascii.b2a_hex(long2bytes(b)), end='\n')
print("gb", binascii.b2a_hex(long2bytes(gb)), end='\n')
print("k", binascii.b2a_hex(long2bytes(k)), end='\n')
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print("kv", binascii.b2a_hex(long2bytes(kv)), end='\n')
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
return B, b
def server_session(user, password, salt, A, B, b):
"""
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = (A * vu) % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K))
return K
def client_proof(user, password, salt, A, B, a, hash_algo):
"""
M = H(H(N) xor H(g), H(I), s, A, B, K)
"""
N, g, k = get_prime()
K = client_session(user, password, salt, A, B, a)
n1 = bytes2long(hash_digest(hashlib.sha1, N))
n2 = bytes2long(hash_digest(hashlib.sha1, g))
if DEBUG_PRINT:
print('n1-1=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-1=', binascii.b2a_hex(long2bytes(n2)), end='\n')
n1 = pow(n1, n2, N)
n2 = bytes2long(hash_digest(hashlib.sha1, user))
M = hash_digest(hash_algo, n1, n2, salt, A, B, K)
if DEBUG_PRINT:
print('n1-2=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-2=', binascii.b2a_hex(long2bytes(n2)), end='\n')
print('client_proof:M=', binascii.b2a_hex(M), end='\n')
return M, K
def get_salt():
if PYTHON_MAJOR_VER == 3:
salt = bytes([random.randrange(0, 256) for x in range(SRP_SALT_SIZE)])
else:
salt = b''.join([chr(random.randrange(0, 256)) for x in range(SRP_SALT_SIZE)])
if DEBUG:
salt = DEBUG_SALT
if DEBUG_PRINT:
print('salt=', binascii.b2a_hex(salt), end='\n')
return salt
def get_verifier(user, password, salt):
N, g, k = get_prime()
x = getUserHash(salt, user, password)
return pow(g, x, N)
if __name__ == '__main__':
"""
A, a, B, b are long.
salt, M are bytes.
client_key, serverKey are bytes.
"""
# Both
user = b'SYSDBA'
password = b'masterkey'
# Client send A to Server
A, a = client_seed()
# Server send B, salt to Client
salt = get_salt()
v = get_verifier(user, password, salt)
B, b = server_seed(v)
serverKey = server_session(user, password, salt, A, B, b)
# sha1
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha1)
assert clientKey == serverKey
# sha256
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha256)
assert clientKey == serverKey
|
nakagami/pyfirebirdsql
|
firebirdsql/srp.py
|
server_session
|
python
|
def server_session(user, password, salt, A, B, b):
N, g, k = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = (A * vu) % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K))
return K
|
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/srp.py#L252-L271
|
[
"def get_prime():\n N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7\n g = 2\n\n #k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))\n k = 1277432915985975349439481660349303019122249719989\n\n return N, g, k\n",
"def hash_digest(hash_algo, *args):\n algo = hash_algo()\n for v in args:\n if not isinstance(v, bytes):\n v = long2bytes(v)\n algo.update(v)\n return algo.digest()\n",
"def get_scramble(x, y):\n return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))\n",
"def get_verifier(user, password, salt):\n N, g, k = get_prime()\n x = getUserHash(salt, user, password)\n return pow(g, x, N)\n"
] |
##############################################################################
# Copyright (c) 2014-2016, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
# This SRP implementation is in reference to
'''
Following document was copied from <http://srp.stanford.edu/design.html>.
-----
SRP Protocol Design
SRP is the newest addition to a new class of strong authentication protocols
that resist all the well-known passive and active attacks over the network. SRP
borrows some elements from other key-exchange and identification protcols and
adds some subtlee modifications and refinements. The result is a protocol that
preserves the strength and efficiency of the EKE family protocols while fixing
some of their shortcomings.
The following is a description of SRP-6 and 6a, the latest versions of SRP:
N A large safe prime (N = 2q+1, where q is prime)
All arithmetic is done modulo N.
g A generator modulo N
k Multiplier parameter (k = H(N, g) in SRP-6a, k = 3 for legacy SRP-6)
s User's salt
I Username
p Cleartext Password
H() One-way hash function
^ (Modular) Exponentiation
u Random scrambling parameter
a,b Secret ephemeral values
A,B Public ephemeral values
x Private key (derived from p and s)
v Password verifier
The host stores passwords using the following formula:
x = H(s, p) (s is chosen randomly)
v = g^x (computes password verifier)
The host then keeps {I, s, v} in its password database. The authentication
protocol itself goes as follows:
User -> Host: I, A = g^a (identifies self, a = random number)
Host -> User: s, B = kv + g^b (sends salt, b = random number)
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
Now the two parties have a shared, strong session key K. To complete
authentication, they need to prove to each other that their keys match.
One possible way:
User -> Host: M = H(H(N) xor H(g), H(I), s, A, B, K)
Host -> User: H(A, M, K)
The two parties also employ the following safeguards:
1. The user will abort if he receives B == 0 (mod N) or u == 0.
2. The host will abort if it detects that A == 0 (mod N).
3. The user must show his proof of K first. If the server detects that the user's proof is incorrect, it must abort without showing its own proof of K.
See http://srp.stanford.edu/ for more information.
'''
from __future__ import print_function
import sys
import hashlib
import random
import binascii
DEBUG = False
DEBUG_PRINT = False
DEBUG_PRIVATE_KEY = 0x60975527035CF2AD1989806F0407210BC81EDC04E2762A56AFD529DDDA2D4393
DEBUG_SALT = binascii.unhexlify('02E268803000000079A478A700000002D1A6979000000026E1601C000000054F')
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
SRP_KEY_SIZE = 128
SRP_SALT_SIZE = 32
def get_prime():
N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7
g = 2
#k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))
k = 1277432915985975349439481660349303019122249719989
return N, g, k
def bytes2long(s):
n = 0
for c in s:
n <<= 8
n += ord(c)
return n
def long2bytes(n):
s = []
while n > 0:
s.insert(0, n & 255)
n >>= 8
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def hash_digest(hash_algo, *args):
algo = hash_algo()
for v in args:
if not isinstance(v, bytes):
v = long2bytes(v)
algo.update(v)
return algo.digest()
def pad(n):
s = []
for x in range(SRP_KEY_SIZE):
s.insert(0, n & 255)
n >>= 8
if n == 0:
break
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def get_scramble(x, y):
return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))
def getUserHash(salt, user, password):
assert isinstance(user, bytes)
assert isinstance(password, bytes)
hash1 = hash_digest(hashlib.sha1, user, b':', password)
hash2 = hash_digest(hashlib.sha1, salt, hash1)
rc = bytes2long(hash2)
return rc
def client_seed(a=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
A: Client public key
a: Client private key
"""
if DEBUG:
a = DEBUG_PRIVATE_KEY
N, g, k = get_prime()
A = pow(g, a, N)
if DEBUG_PRINT:
print('a=', binascii.b2a_hex(long2bytes(a)), end='\n')
print('A=', binascii.b2a_hex(long2bytes(A)), end='\n')
return A, a
def server_seed(v, b=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
B: Server public key
b: Server private key
"""
N, g, k = get_prime()
if DEBUG:
b = DEBUG_PRIVATE_KEY
gb = pow(g, b, N)
kv = (k * v) % N
B = (kv + gb) % N
if DEBUG_PRINT:
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print('b=', binascii.b2a_hex(long2bytes(b)), end='\n')
print("gb", binascii.b2a_hex(long2bytes(gb)), end='\n')
print("k", binascii.b2a_hex(long2bytes(k)), end='\n')
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print("kv", binascii.b2a_hex(long2bytes(kv)), end='\n')
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
return B, b
def client_session(user, password, salt, A, B, a):
"""
Client session secret
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
x = getUserHash(salt, user, password) # x
gx = pow(g, x, N) # g^x
kgx = (k * gx) % N # kg^x
diff = (B - kgx) % N # B - kg^x
ux = (u * x) % N
aux = (a + ux) % N
session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux)
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
print('u=', binascii.b2a_hex(long2bytes(u)), end='\n')
print('x=', binascii.b2a_hex(long2bytes(x)), end='\n')
print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\n')
print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\n')
print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\n')
print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\n')
print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\n')
print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('session_key:K=', binascii.b2a_hex(K))
return K
def client_proof(user, password, salt, A, B, a, hash_algo):
"""
M = H(H(N) xor H(g), H(I), s, A, B, K)
"""
N, g, k = get_prime()
K = client_session(user, password, salt, A, B, a)
n1 = bytes2long(hash_digest(hashlib.sha1, N))
n2 = bytes2long(hash_digest(hashlib.sha1, g))
if DEBUG_PRINT:
print('n1-1=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-1=', binascii.b2a_hex(long2bytes(n2)), end='\n')
n1 = pow(n1, n2, N)
n2 = bytes2long(hash_digest(hashlib.sha1, user))
M = hash_digest(hash_algo, n1, n2, salt, A, B, K)
if DEBUG_PRINT:
print('n1-2=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-2=', binascii.b2a_hex(long2bytes(n2)), end='\n')
print('client_proof:M=', binascii.b2a_hex(M), end='\n')
return M, K
def get_salt():
if PYTHON_MAJOR_VER == 3:
salt = bytes([random.randrange(0, 256) for x in range(SRP_SALT_SIZE)])
else:
salt = b''.join([chr(random.randrange(0, 256)) for x in range(SRP_SALT_SIZE)])
if DEBUG:
salt = DEBUG_SALT
if DEBUG_PRINT:
print('salt=', binascii.b2a_hex(salt), end='\n')
return salt
def get_verifier(user, password, salt):
N, g, k = get_prime()
x = getUserHash(salt, user, password)
return pow(g, x, N)
if __name__ == '__main__':
"""
A, a, B, b are long.
salt, M are bytes.
client_key, serverKey are bytes.
"""
# Both
user = b'SYSDBA'
password = b'masterkey'
# Client send A to Server
A, a = client_seed()
# Server send B, salt to Client
salt = get_salt()
v = get_verifier(user, password, salt)
B, b = server_seed(v)
serverKey = server_session(user, password, salt, A, B, b)
# sha1
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha1)
assert clientKey == serverKey
# sha256
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha256)
assert clientKey == serverKey
|
nakagami/pyfirebirdsql
|
firebirdsql/srp.py
|
client_proof
|
python
|
def client_proof(user, password, salt, A, B, a, hash_algo):
N, g, k = get_prime()
K = client_session(user, password, salt, A, B, a)
n1 = bytes2long(hash_digest(hashlib.sha1, N))
n2 = bytes2long(hash_digest(hashlib.sha1, g))
if DEBUG_PRINT:
print('n1-1=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-1=', binascii.b2a_hex(long2bytes(n2)), end='\n')
n1 = pow(n1, n2, N)
n2 = bytes2long(hash_digest(hashlib.sha1, user))
M = hash_digest(hash_algo, n1, n2, salt, A, B, K)
if DEBUG_PRINT:
print('n1-2=', binascii.b2a_hex(long2bytes(n1)), end='\n')
print('n2-2=', binascii.b2a_hex(long2bytes(n2)), end='\n')
print('client_proof:M=', binascii.b2a_hex(M), end='\n')
return M, K
|
M = H(H(N) xor H(g), H(I), s, A, B, K)
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/srp.py#L274-L296
|
[
"def bytes2long(s):\n n = 0\n for c in s:\n n <<= 8\n n += ord(c)\n return n\n",
"def get_prime():\n N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7\n g = 2\n\n #k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))\n k = 1277432915985975349439481660349303019122249719989\n\n return N, g, k\n",
"def hash_digest(hash_algo, *args):\n algo = hash_algo()\n for v in args:\n if not isinstance(v, bytes):\n v = long2bytes(v)\n algo.update(v)\n return algo.digest()\n",
"def client_session(user, password, salt, A, B, a):\n \"\"\"\n Client session secret\n Both: u = H(A, B)\n\n User: x = H(s, p) (user enters password)\n User: S = (B - kg^x) ^ (a + ux) (computes session key)\n User: K = H(S)\n \"\"\"\n N, g, k = get_prime()\n u = get_scramble(A, B)\n x = getUserHash(salt, user, password) # x\n gx = pow(g, x, N) # g^x\n kgx = (k * gx) % N # kg^x\n diff = (B - kgx) % N # B - kg^x\n ux = (u * x) % N\n aux = (a + ux) % N\n session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux)\n K = hash_digest(hashlib.sha1, session_secret)\n if DEBUG_PRINT:\n print('B=', binascii.b2a_hex(long2bytes(B)), end='\\n')\n print('u=', binascii.b2a_hex(long2bytes(u)), end='\\n')\n print('x=', binascii.b2a_hex(long2bytes(x)), end='\\n')\n print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\\n')\n print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\\n')\n print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\\n')\n print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\\n')\n print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\\n')\n print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\\n')\n print('session_key:K=', binascii.b2a_hex(K))\n\n return K\n"
] |
##############################################################################
# Copyright (c) 2014-2016, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
# This SRP implementation is in reference to
'''
Following document was copied from <http://srp.stanford.edu/design.html>.
-----
SRP Protocol Design
SRP is the newest addition to a new class of strong authentication protocols
that resist all the well-known passive and active attacks over the network. SRP
borrows some elements from other key-exchange and identification protcols and
adds some subtlee modifications and refinements. The result is a protocol that
preserves the strength and efficiency of the EKE family protocols while fixing
some of their shortcomings.
The following is a description of SRP-6 and 6a, the latest versions of SRP:
N A large safe prime (N = 2q+1, where q is prime)
All arithmetic is done modulo N.
g A generator modulo N
k Multiplier parameter (k = H(N, g) in SRP-6a, k = 3 for legacy SRP-6)
s User's salt
I Username
p Cleartext Password
H() One-way hash function
^ (Modular) Exponentiation
u Random scrambling parameter
a,b Secret ephemeral values
A,B Public ephemeral values
x Private key (derived from p and s)
v Password verifier
The host stores passwords using the following formula:
x = H(s, p) (s is chosen randomly)
v = g^x (computes password verifier)
The host then keeps {I, s, v} in its password database. The authentication
protocol itself goes as follows:
User -> Host: I, A = g^a (identifies self, a = random number)
Host -> User: s, B = kv + g^b (sends salt, b = random number)
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
Now the two parties have a shared, strong session key K. To complete
authentication, they need to prove to each other that their keys match.
One possible way:
User -> Host: M = H(H(N) xor H(g), H(I), s, A, B, K)
Host -> User: H(A, M, K)
The two parties also employ the following safeguards:
1. The user will abort if he receives B == 0 (mod N) or u == 0.
2. The host will abort if it detects that A == 0 (mod N).
3. The user must show his proof of K first. If the server detects that the user's proof is incorrect, it must abort without showing its own proof of K.
See http://srp.stanford.edu/ for more information.
'''
from __future__ import print_function
import sys
import hashlib
import random
import binascii
DEBUG = False
DEBUG_PRINT = False
DEBUG_PRIVATE_KEY = 0x60975527035CF2AD1989806F0407210BC81EDC04E2762A56AFD529DDDA2D4393
DEBUG_SALT = binascii.unhexlify('02E268803000000079A478A700000002D1A6979000000026E1601C000000054F')
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
SRP_KEY_SIZE = 128
SRP_SALT_SIZE = 32
def get_prime():
N = 0xE67D2E994B2F900C3F41F08F5BB2627ED0D49EE1FE767A52EFCD565CD6E768812C3E1E9CE8F0A8BEA6CB13CD29DDEBF7A96D4A93B55D488DF099A15C89DCB0640738EB2CBDD9A8F7BAB561AB1B0DC1C6CDABF303264A08D1BCA932D1F1EE428B619D970F342ABA9A65793B8B2F041AE5364350C16F735F56ECBCA87BD57B29E7
g = 2
#k = bytes2long(sha1(pad(N, SRP_KEY_SIZE), pad(g, SRP_KEY_SIZE)))
k = 1277432915985975349439481660349303019122249719989
return N, g, k
def bytes2long(s):
n = 0
for c in s:
n <<= 8
n += ord(c)
return n
def long2bytes(n):
s = []
while n > 0:
s.insert(0, n & 255)
n >>= 8
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def hash_digest(hash_algo, *args):
algo = hash_algo()
for v in args:
if not isinstance(v, bytes):
v = long2bytes(v)
algo.update(v)
return algo.digest()
def pad(n):
s = []
for x in range(SRP_KEY_SIZE):
s.insert(0, n & 255)
n >>= 8
if n == 0:
break
if PYTHON_MAJOR_VER == 3:
return bytes(s)
else:
return b''.join([chr(c) for c in s])
def get_scramble(x, y):
return bytes2long(hash_digest(hashlib.sha1, pad(x), pad(y)))
def getUserHash(salt, user, password):
assert isinstance(user, bytes)
assert isinstance(password, bytes)
hash1 = hash_digest(hashlib.sha1, user, b':', password)
hash2 = hash_digest(hashlib.sha1, salt, hash1)
rc = bytes2long(hash2)
return rc
def client_seed(a=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
A: Client public key
a: Client private key
"""
if DEBUG:
a = DEBUG_PRIVATE_KEY
N, g, k = get_prime()
A = pow(g, a, N)
if DEBUG_PRINT:
print('a=', binascii.b2a_hex(long2bytes(a)), end='\n')
print('A=', binascii.b2a_hex(long2bytes(A)), end='\n')
return A, a
def server_seed(v, b=random.randrange(0, 1 << SRP_KEY_SIZE)):
"""
B: Server public key
b: Server private key
"""
N, g, k = get_prime()
if DEBUG:
b = DEBUG_PRIVATE_KEY
gb = pow(g, b, N)
kv = (k * v) % N
B = (kv + gb) % N
if DEBUG_PRINT:
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print('b=', binascii.b2a_hex(long2bytes(b)), end='\n')
print("gb", binascii.b2a_hex(long2bytes(gb)), end='\n')
print("k", binascii.b2a_hex(long2bytes(k)), end='\n')
print("v", binascii.b2a_hex(long2bytes(v)), end='\n')
print("kv", binascii.b2a_hex(long2bytes(kv)), end='\n')
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
return B, b
def client_session(user, password, salt, A, B, a):
"""
Client session secret
Both: u = H(A, B)
User: x = H(s, p) (user enters password)
User: S = (B - kg^x) ^ (a + ux) (computes session key)
User: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
x = getUserHash(salt, user, password) # x
gx = pow(g, x, N) # g^x
kgx = (k * gx) % N # kg^x
diff = (B - kgx) % N # B - kg^x
ux = (u * x) % N
aux = (a + ux) % N
session_secret = pow(diff, aux, N) # (B - kg^x) ^ (a + ux)
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('B=', binascii.b2a_hex(long2bytes(B)), end='\n')
print('u=', binascii.b2a_hex(long2bytes(u)), end='\n')
print('x=', binascii.b2a_hex(long2bytes(x)), end='\n')
print('gx=', binascii.b2a_hex(long2bytes(gx)), end='\n')
print('kgx=', binascii.b2a_hex(long2bytes(kgx)), end='\n')
print('diff=', binascii.b2a_hex(long2bytes(diff)), end='\n')
print('ux=', binascii.b2a_hex(long2bytes(ux)), end='\n')
print('aux=', binascii.b2a_hex(long2bytes(aux)), end='\n')
print('session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('session_key:K=', binascii.b2a_hex(K))
return K
def server_session(user, password, salt, A, B, b):
"""
Server session secret
Both: u = H(A, B)
Host: S = (Av^u) ^ b (computes session key)
Host: K = H(S)
"""
N, g, k = get_prime()
u = get_scramble(A, B)
v = get_verifier(user, password, salt)
vu = pow(v, u, N) # v^u
Avu = (A * vu) % N # Av^u
session_secret = pow(Avu, b, N) # (Av^u) ^ b
K = hash_digest(hashlib.sha1, session_secret)
if DEBUG_PRINT:
print('server session_secret=', binascii.b2a_hex(long2bytes(session_secret)), end='\n')
print('server session hash K=', binascii.b2a_hex(K))
return K
def get_salt():
if PYTHON_MAJOR_VER == 3:
salt = bytes([random.randrange(0, 256) for x in range(SRP_SALT_SIZE)])
else:
salt = b''.join([chr(random.randrange(0, 256)) for x in range(SRP_SALT_SIZE)])
if DEBUG:
salt = DEBUG_SALT
if DEBUG_PRINT:
print('salt=', binascii.b2a_hex(salt), end='\n')
return salt
def get_verifier(user, password, salt):
N, g, k = get_prime()
x = getUserHash(salt, user, password)
return pow(g, x, N)
if __name__ == '__main__':
"""
A, a, B, b are long.
salt, M are bytes.
client_key, serverKey are bytes.
"""
# Both
user = b'SYSDBA'
password = b'masterkey'
# Client send A to Server
A, a = client_seed()
# Server send B, salt to Client
salt = get_salt()
v = get_verifier(user, password, salt)
B, b = server_seed(v)
serverKey = server_session(user, password, salt, A, B, b)
# sha1
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha1)
assert clientKey == serverKey
# sha256
M, clientKey = client_proof(user, password, salt, A, B, a, hashlib.sha256)
assert clientKey == serverKey
|
nakagami/pyfirebirdsql
|
firebirdsql/decfloat.py
|
dpd_to_int
|
python
|
def dpd_to_int(dpd):
b = [None] * 10
b[9] = 1 if dpd & 0b1000000000 else 0
b[8] = 1 if dpd & 0b0100000000 else 0
b[7] = 1 if dpd & 0b0010000000 else 0
b[6] = 1 if dpd & 0b0001000000 else 0
b[5] = 1 if dpd & 0b0000100000 else 0
b[4] = 1 if dpd & 0b0000010000 else 0
b[3] = 1 if dpd & 0b0000001000 else 0
b[2] = 1 if dpd & 0b0000000100 else 0
b[1] = 1 if dpd & 0b0000000010 else 0
b[0] = 1 if dpd & 0b0000000001 else 0
d = [None] * 3
if b[3] == 0:
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = b[2] * 4 + b[1] * 2 + b[0]
elif (b[3], b[2], b[1]) == (1, 0, 0):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = 8 + b[0]
elif (b[3], b[2], b[1]) == (1, 0, 1):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = 8 + b[4]
d[0] = b[6] * 4 + b[5] * 2 + b[0]
elif (b[3], b[2], b[1]) == (1, 1, 0):
d[2] = 8 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = b[9] * 4 + b[8] * 2 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (0, 0, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = 8 + b[4]
d[0] = b[9] * 4 + b[8] * 2 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (0, 1, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = b[9] * 4 + b[8] * 2 + b[4]
d[0] = 8 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (1, 0, 1, 1, 1):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = 8 + b[4]
d[0] = 8 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (1, 1, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = 8 + b[4]
d[0] = 8 + b[0]
else:
raise ValueError('Invalid DPD encoding')
return d[2] * 100 + d[1] * 10 + d[0]
|
Convert DPD encodined value to int (0-999)
dpd: DPD encoded value. 10bit unsigned int
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/decfloat.py#L47-L100
| null |
##############################################################################
# Copyright (c) 2018, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
import sys
from decimal import Decimal
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
def bytes2long(b):
n = 0
for c in b:
n <<= 8
n += ord(c)
return n
def calc_significand(prefix, dpd_bits, num_bits):
"""
prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits
"""
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format#Densely_packed_decimal_significand_field
num_segments = num_bits // 10
segments = []
for i in range(num_segments):
segments.append(dpd_bits & 0b1111111111)
dpd_bits >>= 10
segments.reverse()
v = prefix
for dpd in segments:
v = v * 1000 + dpd_to_int(dpd)
return v
def decimal128_to_sign_digits_exponent(b):
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format
sign = 1 if ord(b[0]) & 0x80 else 0
combination_field = ((ord(b[0]) & 0x7f) << 10) + (ord(b[1]) << 2) + (ord(b[2]) >> 6)
if (combination_field & 0b11111000000000000) == 0b11111000000000000:
if sign:
return Decimal('-NaN')
else:
return Decimal('NaN')
elif (combination_field & 0b11111000000000000) == 0b11110000000000000:
if sign:
return Decimal('-Infinity')
else:
return Decimal('Infinity')
elif (combination_field & 0b11000000000000000) == 0b00000000000000000:
exponent = 0b00000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11000000000000000) == 0b01000000000000000:
exponent = 0b01000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11000000000000000) == 0b10000000000000000:
exponent = 0b10000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11110000000000000) == 0b11000000000000000:
exponent = 0b00000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
elif (combination_field & 0b11110000000000000) == 0b11010000000000000:
exponent = 0b01000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
elif (combination_field & 0b11110000000000000) == 0b11100000000000000:
exponent = 0b10000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
else:
raise ValueError('decimal128 value error')
exponent -= 6176
dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111
digits = calc_significand(significand_prefix, dpd_bits, 110)
return sign, digits, exponent
def decimal_fixed_to_decimal(b, scale):
v = decimal128_to_sign_digits_exponent(b)
if isinstance(v, Decimal):
return v
sign, digits, _ = v
return Decimal((sign, Decimal(digits).as_tuple()[1], scale))
def decimal64_to_decimal(b):
"decimal64 bytes to Decimal"
# https://en.wikipedia.org/wiki/Decimal64_floating-point_format
sign = 1 if ord(b[0]) & 0x80 else 0
combination_field = (ord(b[0]) >> 2) & 0b11111
exponent = ((ord(b[0]) & 0b11) << 6) + ((ord(b[1]) >> 2) & 0b111111)
dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111
if combination_field == 0b11111:
if sign:
return Decimal('-NaN')
else:
return Decimal('NaN')
elif combination_field == 0b11110:
if sign:
return Decimal('-Infinity')
else:
return Decimal('Infinity')
elif (combination_field & 0b11000) == 0b00000:
exponent = 0b0000000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11000) == 0b01000:
exponent = 0b0100000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11000) == 0b10000:
exponent = 0b1000000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11110) == 0b11000:
exponent = 0b0000000000 + exponent
significand_prefix = 8 + combination_field & 0b1
elif (combination_field & 0b11110) == 0b11010:
exponent = 0b0100000000 + exponent
significand_prefix = 8 + combination_field & 0b1
elif (combination_field & 0b11110) == 0b11100:
exponent = 0b1000000000 + exponent
significand_prefix = 8 + combination_field & 0b1
else:
raise ValueError('decimal64 value error')
digits = calc_significand(significand_prefix, dpd_bits, 50)
exponent -= 398
return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))
def decimal128_to_decimal(b):
"decimal128 bytes to Decimal"
v = decimal128_to_sign_digits_exponent(b)
if isinstance(v, Decimal):
return v
sign, digits, exponent = v
return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))
|
nakagami/pyfirebirdsql
|
firebirdsql/decfloat.py
|
calc_significand
|
python
|
def calc_significand(prefix, dpd_bits, num_bits):
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format#Densely_packed_decimal_significand_field
num_segments = num_bits // 10
segments = []
for i in range(num_segments):
segments.append(dpd_bits & 0b1111111111)
dpd_bits >>= 10
segments.reverse()
v = prefix
for dpd in segments:
v = v * 1000 + dpd_to_int(dpd)
return v
|
prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/decfloat.py#L103-L121
|
[
"def dpd_to_int(dpd):\n \"\"\"\n Convert DPD encodined value to int (0-999)\n dpd: DPD encoded value. 10bit unsigned int\n \"\"\"\n b = [None] * 10\n b[9] = 1 if dpd & 0b1000000000 else 0\n b[8] = 1 if dpd & 0b0100000000 else 0\n b[7] = 1 if dpd & 0b0010000000 else 0\n b[6] = 1 if dpd & 0b0001000000 else 0\n b[5] = 1 if dpd & 0b0000100000 else 0\n b[4] = 1 if dpd & 0b0000010000 else 0\n b[3] = 1 if dpd & 0b0000001000 else 0\n b[2] = 1 if dpd & 0b0000000100 else 0\n b[1] = 1 if dpd & 0b0000000010 else 0\n b[0] = 1 if dpd & 0b0000000001 else 0\n\n d = [None] * 3\n if b[3] == 0:\n d[2] = b[9] * 4 + b[8] * 2 + b[7]\n d[1] = b[6] * 4 + b[5] * 2 + b[4]\n d[0] = b[2] * 4 + b[1] * 2 + b[0]\n elif (b[3], b[2], b[1]) == (1, 0, 0):\n d[2] = b[9] * 4 + b[8] * 2 + b[7]\n d[1] = b[6] * 4 + b[5] * 2 + b[4]\n d[0] = 8 + b[0]\n elif (b[3], b[2], b[1]) == (1, 0, 1):\n d[2] = b[9] * 4 + b[8] * 2 + b[7]\n d[1] = 8 + b[4]\n d[0] = b[6] * 4 + b[5] * 2 + b[0]\n elif (b[3], b[2], b[1]) == (1, 1, 0):\n d[2] = 8 + b[7]\n d[1] = b[6] * 4 + b[5] * 2 + b[4]\n d[0] = b[9] * 4 + b[8] * 2 + b[0]\n elif (b[6], b[5], b[3], b[2], b[1]) == (0, 0, 1, 1, 1):\n d[2] = 8 + b[7]\n d[1] = 8 + b[4]\n d[0] = b[9] * 4 + b[8] * 2 + b[0]\n elif (b[6], b[5], b[3], b[2], b[1]) == (0, 1, 1, 1, 1):\n d[2] = 8 + b[7]\n d[1] = b[9] * 4 + b[8] * 2 + b[4]\n d[0] = 8 + b[0]\n elif (b[6], b[5], b[3], b[2], b[1]) == (1, 0, 1, 1, 1):\n d[2] = b[9] * 4 + b[8] * 2 + b[7]\n d[1] = 8 + b[4]\n d[0] = 8 + b[0]\n elif (b[6], b[5], b[3], b[2], b[1]) == (1, 1, 1, 1, 1):\n d[2] = 8 + b[7]\n d[1] = 8 + b[4]\n d[0] = 8 + b[0]\n else:\n raise ValueError('Invalid DPD encoding')\n\n return d[2] * 100 + d[1] * 10 + d[0]\n"
] |
##############################################################################
# Copyright (c) 2018, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
import sys
from decimal import Decimal
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
def bytes2long(b):
n = 0
for c in b:
n <<= 8
n += ord(c)
return n
def dpd_to_int(dpd):
"""
Convert DPD encodined value to int (0-999)
dpd: DPD encoded value. 10bit unsigned int
"""
b = [None] * 10
b[9] = 1 if dpd & 0b1000000000 else 0
b[8] = 1 if dpd & 0b0100000000 else 0
b[7] = 1 if dpd & 0b0010000000 else 0
b[6] = 1 if dpd & 0b0001000000 else 0
b[5] = 1 if dpd & 0b0000100000 else 0
b[4] = 1 if dpd & 0b0000010000 else 0
b[3] = 1 if dpd & 0b0000001000 else 0
b[2] = 1 if dpd & 0b0000000100 else 0
b[1] = 1 if dpd & 0b0000000010 else 0
b[0] = 1 if dpd & 0b0000000001 else 0
d = [None] * 3
if b[3] == 0:
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = b[2] * 4 + b[1] * 2 + b[0]
elif (b[3], b[2], b[1]) == (1, 0, 0):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = 8 + b[0]
elif (b[3], b[2], b[1]) == (1, 0, 1):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = 8 + b[4]
d[0] = b[6] * 4 + b[5] * 2 + b[0]
elif (b[3], b[2], b[1]) == (1, 1, 0):
d[2] = 8 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = b[9] * 4 + b[8] * 2 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (0, 0, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = 8 + b[4]
d[0] = b[9] * 4 + b[8] * 2 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (0, 1, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = b[9] * 4 + b[8] * 2 + b[4]
d[0] = 8 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (1, 0, 1, 1, 1):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = 8 + b[4]
d[0] = 8 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (1, 1, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = 8 + b[4]
d[0] = 8 + b[0]
else:
raise ValueError('Invalid DPD encoding')
return d[2] * 100 + d[1] * 10 + d[0]
def decimal128_to_sign_digits_exponent(b):
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format
sign = 1 if ord(b[0]) & 0x80 else 0
combination_field = ((ord(b[0]) & 0x7f) << 10) + (ord(b[1]) << 2) + (ord(b[2]) >> 6)
if (combination_field & 0b11111000000000000) == 0b11111000000000000:
if sign:
return Decimal('-NaN')
else:
return Decimal('NaN')
elif (combination_field & 0b11111000000000000) == 0b11110000000000000:
if sign:
return Decimal('-Infinity')
else:
return Decimal('Infinity')
elif (combination_field & 0b11000000000000000) == 0b00000000000000000:
exponent = 0b00000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11000000000000000) == 0b01000000000000000:
exponent = 0b01000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11000000000000000) == 0b10000000000000000:
exponent = 0b10000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11110000000000000) == 0b11000000000000000:
exponent = 0b00000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
elif (combination_field & 0b11110000000000000) == 0b11010000000000000:
exponent = 0b01000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
elif (combination_field & 0b11110000000000000) == 0b11100000000000000:
exponent = 0b10000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
else:
raise ValueError('decimal128 value error')
exponent -= 6176
dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111
digits = calc_significand(significand_prefix, dpd_bits, 110)
return sign, digits, exponent
def decimal_fixed_to_decimal(b, scale):
v = decimal128_to_sign_digits_exponent(b)
if isinstance(v, Decimal):
return v
sign, digits, _ = v
return Decimal((sign, Decimal(digits).as_tuple()[1], scale))
def decimal64_to_decimal(b):
"decimal64 bytes to Decimal"
# https://en.wikipedia.org/wiki/Decimal64_floating-point_format
sign = 1 if ord(b[0]) & 0x80 else 0
combination_field = (ord(b[0]) >> 2) & 0b11111
exponent = ((ord(b[0]) & 0b11) << 6) + ((ord(b[1]) >> 2) & 0b111111)
dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111
if combination_field == 0b11111:
if sign:
return Decimal('-NaN')
else:
return Decimal('NaN')
elif combination_field == 0b11110:
if sign:
return Decimal('-Infinity')
else:
return Decimal('Infinity')
elif (combination_field & 0b11000) == 0b00000:
exponent = 0b0000000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11000) == 0b01000:
exponent = 0b0100000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11000) == 0b10000:
exponent = 0b1000000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11110) == 0b11000:
exponent = 0b0000000000 + exponent
significand_prefix = 8 + combination_field & 0b1
elif (combination_field & 0b11110) == 0b11010:
exponent = 0b0100000000 + exponent
significand_prefix = 8 + combination_field & 0b1
elif (combination_field & 0b11110) == 0b11100:
exponent = 0b1000000000 + exponent
significand_prefix = 8 + combination_field & 0b1
else:
raise ValueError('decimal64 value error')
digits = calc_significand(significand_prefix, dpd_bits, 50)
exponent -= 398
return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))
def decimal128_to_decimal(b):
"decimal128 bytes to Decimal"
v = decimal128_to_sign_digits_exponent(b)
if isinstance(v, Decimal):
return v
sign, digits, exponent = v
return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))
|
nakagami/pyfirebirdsql
|
firebirdsql/decfloat.py
|
decimal128_to_decimal
|
python
|
def decimal128_to_decimal(b):
"decimal128 bytes to Decimal"
v = decimal128_to_sign_digits_exponent(b)
if isinstance(v, Decimal):
return v
sign, digits, exponent = v
return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))
|
decimal128 bytes to Decimal
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/decfloat.py#L216-L222
|
[
"def decimal128_to_sign_digits_exponent(b):\n # https://en.wikipedia.org/wiki/Decimal128_floating-point_format\n sign = 1 if ord(b[0]) & 0x80 else 0\n combination_field = ((ord(b[0]) & 0x7f) << 10) + (ord(b[1]) << 2) + (ord(b[2]) >> 6)\n if (combination_field & 0b11111000000000000) == 0b11111000000000000:\n if sign:\n return Decimal('-NaN')\n else:\n return Decimal('NaN')\n elif (combination_field & 0b11111000000000000) == 0b11110000000000000:\n if sign:\n return Decimal('-Infinity')\n else:\n return Decimal('Infinity')\n elif (combination_field & 0b11000000000000000) == 0b00000000000000000:\n exponent = 0b00000000000000 + (combination_field & 0b111111111111)\n significand_prefix = (combination_field >> 12) & 0b111\n elif (combination_field & 0b11000000000000000) == 0b01000000000000000:\n exponent = 0b01000000000000 + (combination_field & 0b111111111111)\n significand_prefix = (combination_field >> 12) & 0b111\n elif (combination_field & 0b11000000000000000) == 0b10000000000000000:\n exponent = 0b10000000000000 + (combination_field & 0b111111111111)\n significand_prefix = (combination_field >> 12) & 0b111\n elif (combination_field & 0b11110000000000000) == 0b11000000000000000:\n exponent = 0b00000000000000 + (combination_field & 0b111111111111)\n significand_prefix = 8 + (combination_field >> 12) & 0b1\n elif (combination_field & 0b11110000000000000) == 0b11010000000000000:\n exponent = 0b01000000000000 + (combination_field & 0b111111111111)\n significand_prefix = 8 + (combination_field >> 12) & 0b1\n elif (combination_field & 0b11110000000000000) == 0b11100000000000000:\n exponent = 0b10000000000000 + (combination_field & 0b111111111111)\n significand_prefix = 8 + (combination_field >> 12) & 0b1\n else:\n raise ValueError('decimal128 value error')\n exponent -= 6176\n\n dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\n digits = calc_significand(significand_prefix, dpd_bits, 110)\n return sign, digits, exponent\n"
] |
##############################################################################
# Copyright (c) 2018, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
import sys
from decimal import Decimal
PYTHON_MAJOR_VER = sys.version_info[0]
if PYTHON_MAJOR_VER == 3:
def ord(c):
return c
def bytes2long(b):
n = 0
for c in b:
n <<= 8
n += ord(c)
return n
def dpd_to_int(dpd):
"""
Convert DPD encodined value to int (0-999)
dpd: DPD encoded value. 10bit unsigned int
"""
b = [None] * 10
b[9] = 1 if dpd & 0b1000000000 else 0
b[8] = 1 if dpd & 0b0100000000 else 0
b[7] = 1 if dpd & 0b0010000000 else 0
b[6] = 1 if dpd & 0b0001000000 else 0
b[5] = 1 if dpd & 0b0000100000 else 0
b[4] = 1 if dpd & 0b0000010000 else 0
b[3] = 1 if dpd & 0b0000001000 else 0
b[2] = 1 if dpd & 0b0000000100 else 0
b[1] = 1 if dpd & 0b0000000010 else 0
b[0] = 1 if dpd & 0b0000000001 else 0
d = [None] * 3
if b[3] == 0:
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = b[2] * 4 + b[1] * 2 + b[0]
elif (b[3], b[2], b[1]) == (1, 0, 0):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = 8 + b[0]
elif (b[3], b[2], b[1]) == (1, 0, 1):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = 8 + b[4]
d[0] = b[6] * 4 + b[5] * 2 + b[0]
elif (b[3], b[2], b[1]) == (1, 1, 0):
d[2] = 8 + b[7]
d[1] = b[6] * 4 + b[5] * 2 + b[4]
d[0] = b[9] * 4 + b[8] * 2 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (0, 0, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = 8 + b[4]
d[0] = b[9] * 4 + b[8] * 2 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (0, 1, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = b[9] * 4 + b[8] * 2 + b[4]
d[0] = 8 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (1, 0, 1, 1, 1):
d[2] = b[9] * 4 + b[8] * 2 + b[7]
d[1] = 8 + b[4]
d[0] = 8 + b[0]
elif (b[6], b[5], b[3], b[2], b[1]) == (1, 1, 1, 1, 1):
d[2] = 8 + b[7]
d[1] = 8 + b[4]
d[0] = 8 + b[0]
else:
raise ValueError('Invalid DPD encoding')
return d[2] * 100 + d[1] * 10 + d[0]
def calc_significand(prefix, dpd_bits, num_bits):
"""
prefix: High bits integer value
dpd_bits: dpd encoded bits
num_bits: bit length of dpd_bits
"""
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format#Densely_packed_decimal_significand_field
num_segments = num_bits // 10
segments = []
for i in range(num_segments):
segments.append(dpd_bits & 0b1111111111)
dpd_bits >>= 10
segments.reverse()
v = prefix
for dpd in segments:
v = v * 1000 + dpd_to_int(dpd)
return v
def decimal128_to_sign_digits_exponent(b):
# https://en.wikipedia.org/wiki/Decimal128_floating-point_format
sign = 1 if ord(b[0]) & 0x80 else 0
combination_field = ((ord(b[0]) & 0x7f) << 10) + (ord(b[1]) << 2) + (ord(b[2]) >> 6)
if (combination_field & 0b11111000000000000) == 0b11111000000000000:
if sign:
return Decimal('-NaN')
else:
return Decimal('NaN')
elif (combination_field & 0b11111000000000000) == 0b11110000000000000:
if sign:
return Decimal('-Infinity')
else:
return Decimal('Infinity')
elif (combination_field & 0b11000000000000000) == 0b00000000000000000:
exponent = 0b00000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11000000000000000) == 0b01000000000000000:
exponent = 0b01000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11000000000000000) == 0b10000000000000000:
exponent = 0b10000000000000 + (combination_field & 0b111111111111)
significand_prefix = (combination_field >> 12) & 0b111
elif (combination_field & 0b11110000000000000) == 0b11000000000000000:
exponent = 0b00000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
elif (combination_field & 0b11110000000000000) == 0b11010000000000000:
exponent = 0b01000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
elif (combination_field & 0b11110000000000000) == 0b11100000000000000:
exponent = 0b10000000000000 + (combination_field & 0b111111111111)
significand_prefix = 8 + (combination_field >> 12) & 0b1
else:
raise ValueError('decimal128 value error')
exponent -= 6176
dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111
digits = calc_significand(significand_prefix, dpd_bits, 110)
return sign, digits, exponent
def decimal_fixed_to_decimal(b, scale):
v = decimal128_to_sign_digits_exponent(b)
if isinstance(v, Decimal):
return v
sign, digits, _ = v
return Decimal((sign, Decimal(digits).as_tuple()[1], scale))
def decimal64_to_decimal(b):
"decimal64 bytes to Decimal"
# https://en.wikipedia.org/wiki/Decimal64_floating-point_format
sign = 1 if ord(b[0]) & 0x80 else 0
combination_field = (ord(b[0]) >> 2) & 0b11111
exponent = ((ord(b[0]) & 0b11) << 6) + ((ord(b[1]) >> 2) & 0b111111)
dpd_bits = bytes2long(b) & 0b11111111111111111111111111111111111111111111111111
if combination_field == 0b11111:
if sign:
return Decimal('-NaN')
else:
return Decimal('NaN')
elif combination_field == 0b11110:
if sign:
return Decimal('-Infinity')
else:
return Decimal('Infinity')
elif (combination_field & 0b11000) == 0b00000:
exponent = 0b0000000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11000) == 0b01000:
exponent = 0b0100000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11000) == 0b10000:
exponent = 0b1000000000 + exponent
significand_prefix = combination_field & 0b111
elif (combination_field & 0b11110) == 0b11000:
exponent = 0b0000000000 + exponent
significand_prefix = 8 + combination_field & 0b1
elif (combination_field & 0b11110) == 0b11010:
exponent = 0b0100000000 + exponent
significand_prefix = 8 + combination_field & 0b1
elif (combination_field & 0b11110) == 0b11100:
exponent = 0b1000000000 + exponent
significand_prefix = 8 + combination_field & 0b1
else:
raise ValueError('decimal64 value error')
digits = calc_significand(significand_prefix, dpd_bits, 50)
exponent -= 398
return Decimal((sign, Decimal(digits).as_tuple()[1], exponent))
|
nakagami/pyfirebirdsql
|
firebirdsql/wireprotocol.py
|
WireProtocol.str_to_bytes
|
python
|
def str_to_bytes(self, s):
"convert str to bytes"
if (PYTHON_MAJOR_VER == 3 or
(PYTHON_MAJOR_VER == 2 and type(s) == unicode)):
return s.encode(charset_map.get(self.charset, self.charset))
return s
|
convert str to bytes
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/wireprotocol.py#L227-L232
| null |
class WireProtocol(object):
buffer_length = 1024
op_connect = 1
op_exit = 2
op_accept = 3
op_reject = 4
op_protocol = 5
op_disconnect = 6
op_response = 9
op_attach = 19
op_create = 20
op_detach = 21
op_transaction = 29
op_commit = 30
op_rollback = 31
op_open_blob = 35
op_get_segment = 36
op_put_segment = 37
op_close_blob = 39
op_info_database = 40
op_info_transaction = 42
op_batch_segments = 44
op_que_events = 48
op_cancel_events = 49
op_commit_retaining = 50
op_event = 52
op_connect_request = 53
op_aux_connect = 53
op_create_blob2 = 57
op_allocate_statement = 62
op_execute = 63
op_exec_immediate = 64
op_fetch = 65
op_fetch_response = 66
op_free_statement = 67
op_prepare_statement = 68
op_info_sql = 70
op_dummy = 71
op_execute2 = 76
op_sql_response = 78
op_drop_database = 81
op_service_attach = 82
op_service_detach = 83
op_service_info = 84
op_service_start = 85
op_rollback_retaining = 86
# FB3
op_update_account_info = 87
op_authenticate_user = 88
op_partial = 89
op_trusted_auth = 90
op_cancel = 91
op_cont_auth = 92
op_ping = 93
op_accept_data = 94
op_abort_aux_connection = 95
op_crypt = 96
op_crypt_key_callback = 97
op_cond_accept = 98
def __init__(self):
self.accept_plugin_name = ''
self.auth_data = b''
def recv_channel(self, nbytes, word_alignment=False):
n = nbytes
if word_alignment and (n % 4):
n += 4 - nbytes % 4 # 4 bytes word alignment
r = bs([])
while n:
if (self.timeout is not None and select.select([self.sock._sock], [], [], self.timeout)[0] == []):
break
b = self.sock.recv(n)
if not b:
break
r += b
n -= len(b)
if len(r) < nbytes:
raise OperationalError('Can not recv() packets')
return r[:nbytes]
def bytes_to_str(self, b):
"convert bytes array to raw string"
if PYTHON_MAJOR_VER == 3:
return b.decode(charset_map.get(self.charset, self.charset))
return b
def bytes_to_ustr(self, b):
"convert bytes array to unicode string"
return b.decode(charset_map.get(self.charset, self.charset))
def _parse_status_vector(self):
sql_code = 0
gds_codes = set()
message = ''
n = bytes_to_bint(self.recv_channel(4))
while n != isc_arg_end:
if n == isc_arg_gds:
gds_code = bytes_to_bint(self.recv_channel(4))
if gds_code:
gds_codes.add(gds_code)
message += messages.get(gds_code, '@1')
num_arg = 0
elif n == isc_arg_number:
num = bytes_to_bint(self.recv_channel(4))
if gds_code == 335544436:
sql_code = num
num_arg += 1
message = message.replace('@' + str(num_arg), str(num))
elif n == isc_arg_string:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
num_arg += 1
message = message.replace('@' + str(num_arg), s)
elif n == isc_arg_interpreted:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
message += s
elif n == isc_arg_sql_state:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
n = bytes_to_bint(self.recv_channel(4))
return (gds_codes, sql_code, message)
def _parse_op_response(self):
b = self.recv_channel(16)
h = bytes_to_bint(b[0:4]) # Object handle
oid = b[4:12] # Object ID
buf_len = bytes_to_bint(b[12:]) # buffer length
buf = self.recv_channel(buf_len, word_alignment=True)
(gds_codes, sql_code, message) = self._parse_status_vector()
if gds_codes.intersection([
335544838, 335544879, 335544880, 335544466, 335544665, 335544347, 335544558
]):
raise IntegrityError(message, gds_codes, sql_code)
elif gds_codes.intersection([335544321]):
warnings.warn(message)
elif (sql_code or message) and not gds_codes.intersection([335544434]):
raise OperationalError(message, gds_codes, sql_code)
return (h, oid, buf)
def _parse_op_event(self):
b = self.recv_channel(4096) # too large TODO: read step by step
# TODO: parse event name
db_handle = bytes_to_bint(b[0:4])
event_id = bytes_to_bint(b[-4:])
return (db_handle, event_id, {})
def _create_blob(self, trans_handle, b):
self._op_create_blob2(trans_handle)
(blob_handle, blob_id, buf) = self._op_response()
i = 0
while i < len(b):
self._op_put_segment(blob_handle, b[i:i+BLOB_SEGMENT_SIZE])
(h, oid, buf) = self._op_response()
i += BLOB_SEGMENT_SIZE
self._op_close_blob(blob_handle)
(h, oid, buf) = self._op_response()
return blob_id
def params_to_blr(self, trans_handle, params):
"Convert parameter array to BLR and values format."
ln = len(params) * 2
blr = bs([5, 2, 4, 0, ln & 255, ln >> 8])
if self.accept_version < PROTOCOL_VERSION13:
values = bs([])
else:
# start with null indicator bitmap
null_indicator = 0
for i, p in enumerate(params):
if p is None:
null_indicator |= (1 << i)
n = len(params) // 8
if len(params) % 8 != 0:
n += 1
if n % 4: # padding
n += 4 - n % 4
null_indicator_bytes = []
for i in range(n):
null_indicator_bytes.append(null_indicator & 255)
null_indicator >>= 8
values = bs(null_indicator_bytes)
for p in params:
if (
(PYTHON_MAJOR_VER == 2 and type(p) == unicode) or
(PYTHON_MAJOR_VER == 3 and type(p) == str)
):
p = self.str_to_bytes(p)
t = type(p)
if p is None:
v = bs([])
blr += bs([14, 0, 0])
elif (
(PYTHON_MAJOR_VER == 2 and t == str) or
(PYTHON_MAJOR_VER == 3 and t == bytes)
):
if len(p) > MAX_CHAR_LENGTH:
v = self._create_blob(trans_handle, p)
blr += bs([9, 0])
else:
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
elif t == int:
v = bint_to_bytes(p, 4)
blr += bs([8, 0]) # blr_long
elif t == float and p == float("inf"):
v = b'\x7f\x80\x00\x00'
blr += bs([10])
elif t == decimal.Decimal or t == float:
if t == float:
p = decimal.Decimal(str(p))
(sign, digits, exponent) = p.as_tuple()
v = 0
ln = len(digits)
for i in range(ln):
v += digits[i] * (10 ** (ln - i - 1))
if sign:
v *= -1
v = bint_to_bytes(v, 8)
if exponent < 0:
exponent += 256
blr += bs([16, exponent])
elif t == datetime.date:
v = convert_date(p)
blr += bs([12])
elif t == datetime.time:
if p.tzinfo:
v = convert_time_tz(p)
blr += bs([28])
else:
v = convert_time(p)
blr += bs([13])
elif t == datetime.datetime:
if p.tzinfo:
v = convert_timestamp_tz(p)
blr += bs([29])
else:
v = convert_timestamp(p)
blr += bs([35])
elif t == bool:
v = bs([1, 0, 0, 0]) if p else bs([0, 0, 0, 0])
blr += bs([23])
else: # fallback, convert to string
p = p.__repr__()
if PYTHON_MAJOR_VER == 3 or (PYTHON_MAJOR_VER == 2 and type(p) == unicode):
p = self.str_to_bytes(p)
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
blr += bs([7, 0])
values += v
if self.accept_version < PROTOCOL_VERSION13:
values += bs([0]) * 4 if not p is None else bs([0xff, 0xff, 0xff, 0xff])
blr += bs([255, 76]) # [blr_end, blr_eoc]
return blr, values
def uid(self, auth_plugin_name, wire_crypt):
def pack_cnct_param(k, v):
if k != CNCT_specific_data:
return bs([k] + [len(v)]) + v
# specific_data split per 254 bytes
b = b''
i = 0
while len(v) > 254:
b += bs([k, 255, i]) + v[:254]
v = v[254:]
i += 1
b += bs([k, len(v)+1, i]) + v
return b
auth_plugin_list = ('Srp256', 'Srp', 'Legacy_Auth')
# get and calculate CNCT_xxxx values
if sys.platform == 'win32':
user = os.environ['USERNAME']
hostname = os.environ['COMPUTERNAME']
else:
user = os.environ.get('USER', '')
hostname = socket.gethostname()
if auth_plugin_name in ('Srp256', 'Srp'):
self.client_public_key, self.client_private_key = srp.client_seed()
specific_data = bytes_to_hex(srp.long2bytes(self.client_public_key))
elif auth_plugin_name == 'Legacy_Auth':
assert crypt, "Legacy_Auth needs crypt module"
specific_data = self.str_to_bytes(get_crypt(self.password))
else:
raise OperationalError("Unknown auth plugin name '%s'" % (auth_plugin_name,))
self.plugin_name = auth_plugin_name
self.plugin_list = b','.join([s.encode('utf-8') for s in auth_plugin_list])
client_crypt = b'\x01\x00\x00\x00' if wire_crypt else b'\x00\x00\x00\x00'
# set CNCT_xxxx values
r = b''
r += pack_cnct_param(CNCT_login, self.str_to_bytes(self.user))
r += pack_cnct_param(CNCT_plugin_name, self.str_to_bytes(self.plugin_name))
r += pack_cnct_param(CNCT_plugin_list, self.plugin_list)
r += pack_cnct_param(CNCT_specific_data, specific_data)
r += pack_cnct_param(CNCT_client_crypt, client_crypt)
r += pack_cnct_param(CNCT_user, self.str_to_bytes(user))
r += pack_cnct_param(CNCT_host, self.str_to_bytes(hostname))
r += pack_cnct_param(CNCT_user_verification, b'')
return r
@wire_operation
def _op_connect(self, auth_plugin_name, wire_crypt):
protocols = [
# PROTOCOL_VERSION, Arch type (Generic=1), min, max, weight
'0000000a00000001000000000000000500000002', # 10, 1, 0, 5, 2
'ffff800b00000001000000000000000500000004', # 11, 1, 0, 5, 4
'ffff800c00000001000000000000000500000006', # 12, 1, 0, 5, 6
'ffff800d00000001000000000000000500000008', # 13, 1, 0, 5, 8
]
p = xdrlib.Packer()
p.pack_int(self.op_connect)
p.pack_int(self.op_attach)
p.pack_int(3) # CONNECT_VERSION
p.pack_int(1) # arch_generic
p.pack_string(self.str_to_bytes(self.filename if self.filename else ''))
p.pack_int(len(protocols))
p.pack_bytes(self.uid(auth_plugin_name, wire_crypt))
self.sock.send(p.get_buffer() + hex_to_bytes(''.join(protocols)))
@wire_operation
def _op_create(self, page_size=4096):
dpb = bs([1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_set_db_charset, len(s)]) + s
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
dpb += bs([isc_dpb_sql_dialect, 4]) + int_to_bytes(3, 4)
dpb += bs([isc_dpb_force_write, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_overwrite, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_page_size, 4]) + int_to_bytes(page_size, 4)
p = xdrlib.Packer()
p.pack_int(self.op_create)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cont_auth(self, auth_data, auth_plugin_name, auth_plugin_list, keys):
p = xdrlib.Packer()
p.pack_int(self.op_cont_auth)
p.pack_string(bytes_to_hex(auth_data))
p.pack_bytes(auth_plugin_name)
p.pack_bytes(auth_plugin_list)
p.pack_bytes(keys)
self.sock.send(p.get_buffer())
@wire_operation
def _parse_connect_response(self):
# want and treat op_accept or op_cond_accept or op_accept_data
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
if bytes_to_bint(b) == self.op_reject:
raise OperationalError('Connection is rejected')
op_code = bytes_to_bint(b)
if op_code == self.op_response:
return self._parse_op_response() # error occured
b = self.recv_channel(12)
self.accept_version = byte_to_int(b[3])
self.accept_architecture = bytes_to_bint(b[4:8])
self.accept_type = bytes_to_bint(b[8:])
self.lazy_response_count = 0
if op_code == self.op_cond_accept or op_code == self.op_accept_data:
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
self.accept_plugin_name = self.recv_channel(ln, word_alignment=True)
is_authenticated = bytes_to_bint(self.recv_channel(4))
ln = bytes_to_bint(self.recv_channel(4))
self.recv_channel(ln, word_alignment=True) # keys
if is_authenticated == 0:
if self.accept_plugin_name in (b'Srp256', b'Srp'):
hash_algo = {
b'Srp256': hashlib.sha256,
b'Srp': hashlib.sha1,
}[self.accept_plugin_name]
user = self.user
if len(user) > 2 and user[0] == user[-1] == '"':
user = user[1:-1]
user = user.replace('""','"')
else:
user = user.upper()
if len(data) == 0:
# send op_cont_auth
self._op_cont_auth(
srp.long2bytes(self.client_public_key),
self.accept_plugin_name,
self.plugin_list,
b''
)
# parse op_cont_auth
b = self.recv_channel(4)
assert bytes_to_bint(b) == self.op_cont_auth
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_name = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_list = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
keys = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_int(data[:2])
server_salt = data[2:ln+2]
server_public_key = srp.bytes2long(
hex_to_bytes(data[4+ln:]))
auth_data, session_key = srp.client_proof(
self.str_to_bytes(user),
self.str_to_bytes(self.password),
server_salt,
self.client_public_key,
server_public_key,
self.client_private_key,
hash_algo)
elif self.accept_plugin_name == b'Legacy_Auth':
auth_data = self.str_to_bytes(get_crypt(self.password))
session_key = b''
else:
raise OperationalError(
'Unknown auth plugin %s' % (self.accept_plugin_name)
)
else:
auth_data = b''
session_key = b''
if op_code == self.op_cond_accept:
self._op_cont_auth(
auth_data,
self.accept_plugin_name,
self.plugin_list,
b''
)
(h, oid, buf) = self._op_response()
if self.wire_crypt and session_key:
# op_crypt: plugin[Arc4] key[Symmetric]
p = xdrlib.Packer()
p.pack_int(self.op_crypt)
p.pack_string(b'Arc4')
p.pack_string(b'Symmetric')
self.sock.send(p.get_buffer())
self.sock.set_translator(
ARC4.new(session_key), ARC4.new(session_key))
(h, oid, buf) = self._op_response()
else: # use later _op_attach() and _op_create()
self.auth_data = auth_data
else:
assert op_code == self.op_accept
@wire_operation
def _op_attach(self):
dpb = bs([isc_dpb_version1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
dpb += bs([isc_dpb_process_id, 4]) + int_to_bytes(os.getpid(), 4)
s = self.str_to_bytes(sys.argv[0])
dpb += bs([isc_dpb_process_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
p = xdrlib.Packer()
p.pack_int(self.op_attach)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_drop_database(self):
if self.db_handle is None:
raise OperationalError('_op_drop_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_drop_database)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_attach(self):
spb = bs([2, 2])
s = self.str_to_bytes(self.user)
spb += bs([isc_spb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
spb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
spb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.auth_data:
s = self.str_to_bytes(bytes_to_hex(self.auth_data))
spb += bs([isc_dpb_specific_auth_data, len(s)]) + s
spb += bs([isc_spb_dummy_packet_interval, 0x04, 0x78, 0x0a, 0x00, 0x00])
p = xdrlib.Packer()
p.pack_int(self.op_service_attach)
p.pack_int(0)
p.pack_string(self.str_to_bytes('service_mgr'))
p.pack_bytes(spb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_info(self, param, item, buffer_length=512):
if self.db_handle is None:
raise OperationalError('_op_service_info() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_info)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
p.pack_bytes(item)
p.pack_int(buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_start(self, param):
if self.db_handle is None:
raise OperationalError('_op_service_start() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_start)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_detach(self):
if self.db_handle is None:
raise OperationalError('_op_service_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_database(self, b):
if self.db_handle is None:
raise OperationalError('_op_info_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_info_database)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_transaction(self, tpb):
if self.db_handle is None:
raise OperationalError('_op_transaction() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_transaction)
p.pack_int(self.db_handle)
p.pack_bytes(tpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_allocate_statement(self):
if self.db_handle is None:
raise OperationalError('_op_allocate_statement() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_allocate_statement)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_transaction(self, trans_handle, b):
p = xdrlib.Packer()
p.pack_int(self.op_info_transaction)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_free_statement(self, stmt_handle, mode):
p = xdrlib.Packer()
p.pack_int(self.op_free_statement)
p.pack_int(stmt_handle)
p.pack_int(mode)
self.sock.send(p.get_buffer())
@wire_operation
def _op_prepare_statement(self, stmt_handle, trans_handle, query, option_items=None):
if option_items is None:
option_items=bs([])
desc_items = option_items + bs([isc_info_sql_stmt_type])+INFO_SQL_SELECT_DESCRIBE_VARS
p = xdrlib.Packer()
p.pack_int(self.op_prepare_statement)
p.pack_int(trans_handle)
p.pack_int(stmt_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_sql(self, stmt_handle, vars):
p = xdrlib.Packer()
p.pack_int(self.op_info_sql)
p.pack_int(stmt_handle)
p.pack_int(0)
p.pack_bytes(vars)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_execute(self, stmt_handle, trans_handle, params):
p = xdrlib.Packer()
p.pack_int(self.op_execute)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
self.sock.send(p.get_buffer() + values)
@wire_operation
def _op_execute2(self, stmt_handle, trans_handle, params, output_blr):
p = xdrlib.Packer()
p.pack_int(self.op_execute2)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
values = b''
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
q = xdrlib.Packer()
q.pack_bytes(output_blr)
q.pack_int(0)
self.sock.send(p.get_buffer() + values + q.get_buffer())
@wire_operation
def _op_exec_immediate(self, trans_handle, query):
if self.db_handle is None:
raise OperationalError('_op_exec_immediate() Invalid db handle')
desc_items = bs([])
p = xdrlib.Packer()
p.pack_int(self.op_exec_immediate)
p.pack_int(trans_handle)
p.pack_int(self.db_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch(self, stmt_handle, blr):
p = xdrlib.Packer()
p.pack_int(self.op_fetch)
p.pack_int(stmt_handle)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(400)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch_response(self, stmt_handle, xsqlda):
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_dummy:
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
op_code = bytes_to_bint(self.recv_channel(4))
if op_code != self.op_fetch_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("op_fetch_response:op_code = %d" % (op_code,))
b = self.recv_channel(8)
status = bytes_to_bint(b[:4])
count = bytes_to_bint(b[4:8])
rows = []
while count:
r = [None] * len(xsqlda)
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r[i] = x.value(raw_value)
else: # PROTOCOL_VERSION13
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
continue
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r[i] = x.value(raw_value)
rows.append(r)
b = self.recv_channel(12)
op_code = bytes_to_bint(b[:4])
status = bytes_to_bint(b[4:8])
count = bytes_to_bint(b[8:])
return rows, status != 100
@wire_operation
def _op_detach(self):
if self.db_handle is None:
raise OperationalError('_op_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_open_blob(self, blob_id, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_open_blob)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer() + blob_id)
@wire_operation
def _op_create_blob2(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_create_blob2)
p.pack_int(0)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_get_segment(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_get_segment)
p.pack_int(blob_handle)
p.pack_int(self.buffer_length)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_put_segment(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_put_segment)
p.pack_int(blob_handle)
p.pack_int(ln)
p.pack_int(ln)
pad_length = (4-ln) & 3
self.sock.send(p.get_buffer() + seg_data + bs([0])*pad_length)
@wire_operation
def _op_batch_segments(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_batch_segments)
p.pack_int(blob_handle)
p.pack_int(ln + 2)
p.pack_int(ln + 2)
pad_length = ((4-(ln+2)) & 3)
self.sock.send(p.get_buffer() + int_to_bytes(ln, 2) + seg_data + bs([0])*pad_length)
@wire_operation
def _op_close_blob(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_close_blob)
p.pack_int(blob_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_que_events(self, event_names, event_id):
if self.db_handle is None:
raise OperationalError('_op_que_events() Invalid db handle')
params = bs([1])
for name, n in event_names.items():
params += bs([len(name)])
params += self.str_to_bytes(name)
params += int_to_bytes(n, 4)
p = xdrlib.Packer()
p.pack_int(self.op_que_events)
p.pack_int(self.db_handle)
p.pack_bytes(params)
p.pack_int(0) # ast
p.pack_int(0) # args
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cancel_events(self, event_id):
if self.db_handle is None:
raise OperationalError('_op_cancel_events() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_cancel_events)
p.pack_int(self.db_handle)
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_connect_request(self):
if self.db_handle is None:
raise OperationalError('_op_connect_request() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_connect_request)
p.pack_int(1) # async
p.pack_int(self.db_handle)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_response(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_cont_auth:
raise OperationalError('Unauthorized')
elif op_code != self.op_response:
raise InternalError("_op_response:op_code = %d" % (op_code,))
return self._parse_op_response()
@wire_operation
def _op_event(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_exit or bytes_to_bint(b) == self.op_exit:
raise DisconnectByPeer
if op_code != self.op_event:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_event:op_code = %d" % (op_code,))
return self._parse_op_event()
@wire_operation
def _op_sql_response(self, xsqlda):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code != self.op_sql_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_sql_response:op_code = %d" % (op_code,))
b = self.recv_channel(4)
count = bytes_to_bint(b[:4])
r = []
if count == 0:
return []
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r.append(x.value(raw_value))
else:
r.append(None)
else:
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
r.append(None)
else:
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r.append(x.value(raw_value))
return r
def _wait_for_event(self, timeout):
event_names = {}
event_id = 0
while True:
b4 = self.recv_channel(4)
if b4 is None:
return None
op_code = bytes_to_bint(b4)
if op_code == self.op_dummy:
pass
elif op_code == self.op_exit or op_code == self.op_disconnect:
break
elif op_code == self.op_event:
bytes_to_int(self.recv_channel(4)) # db_handle
ln = bytes_to_bint(self.recv_channel(4))
b = self.recv_channel(ln, word_alignment=True)
assert byte_to_int(b[0]) == 1
i = 1
while i < len(b):
ln = byte_to_int(b[i])
s = self.connection.bytes_to_str(b[i+1:i+1+ln])
n = bytes_to_int(b[i+1+ln:i+1+ln+4])
event_names[s] = n
i += ln + 5
self.recv_channel(8) # ignore AST info
event_id = bytes_to_bint(self.recv_channel(4))
break
else:
raise InternalError("_wait_for_event:op_code = %d" % (op_code,))
return (event_id, event_names)
|
nakagami/pyfirebirdsql
|
firebirdsql/wireprotocol.py
|
WireProtocol.bytes_to_str
|
python
|
def bytes_to_str(self, b):
"convert bytes array to raw string"
if PYTHON_MAJOR_VER == 3:
return b.decode(charset_map.get(self.charset, self.charset))
return b
|
convert bytes array to raw string
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/wireprotocol.py#L234-L238
| null |
class WireProtocol(object):
buffer_length = 1024
op_connect = 1
op_exit = 2
op_accept = 3
op_reject = 4
op_protocol = 5
op_disconnect = 6
op_response = 9
op_attach = 19
op_create = 20
op_detach = 21
op_transaction = 29
op_commit = 30
op_rollback = 31
op_open_blob = 35
op_get_segment = 36
op_put_segment = 37
op_close_blob = 39
op_info_database = 40
op_info_transaction = 42
op_batch_segments = 44
op_que_events = 48
op_cancel_events = 49
op_commit_retaining = 50
op_event = 52
op_connect_request = 53
op_aux_connect = 53
op_create_blob2 = 57
op_allocate_statement = 62
op_execute = 63
op_exec_immediate = 64
op_fetch = 65
op_fetch_response = 66
op_free_statement = 67
op_prepare_statement = 68
op_info_sql = 70
op_dummy = 71
op_execute2 = 76
op_sql_response = 78
op_drop_database = 81
op_service_attach = 82
op_service_detach = 83
op_service_info = 84
op_service_start = 85
op_rollback_retaining = 86
# FB3
op_update_account_info = 87
op_authenticate_user = 88
op_partial = 89
op_trusted_auth = 90
op_cancel = 91
op_cont_auth = 92
op_ping = 93
op_accept_data = 94
op_abort_aux_connection = 95
op_crypt = 96
op_crypt_key_callback = 97
op_cond_accept = 98
def __init__(self):
self.accept_plugin_name = ''
self.auth_data = b''
def recv_channel(self, nbytes, word_alignment=False):
n = nbytes
if word_alignment and (n % 4):
n += 4 - nbytes % 4 # 4 bytes word alignment
r = bs([])
while n:
if (self.timeout is not None and select.select([self.sock._sock], [], [], self.timeout)[0] == []):
break
b = self.sock.recv(n)
if not b:
break
r += b
n -= len(b)
if len(r) < nbytes:
raise OperationalError('Can not recv() packets')
return r[:nbytes]
def str_to_bytes(self, s):
"convert str to bytes"
if (PYTHON_MAJOR_VER == 3 or
(PYTHON_MAJOR_VER == 2 and type(s) == unicode)):
return s.encode(charset_map.get(self.charset, self.charset))
return s
def bytes_to_ustr(self, b):
"convert bytes array to unicode string"
return b.decode(charset_map.get(self.charset, self.charset))
def _parse_status_vector(self):
sql_code = 0
gds_codes = set()
message = ''
n = bytes_to_bint(self.recv_channel(4))
while n != isc_arg_end:
if n == isc_arg_gds:
gds_code = bytes_to_bint(self.recv_channel(4))
if gds_code:
gds_codes.add(gds_code)
message += messages.get(gds_code, '@1')
num_arg = 0
elif n == isc_arg_number:
num = bytes_to_bint(self.recv_channel(4))
if gds_code == 335544436:
sql_code = num
num_arg += 1
message = message.replace('@' + str(num_arg), str(num))
elif n == isc_arg_string:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
num_arg += 1
message = message.replace('@' + str(num_arg), s)
elif n == isc_arg_interpreted:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
message += s
elif n == isc_arg_sql_state:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
n = bytes_to_bint(self.recv_channel(4))
return (gds_codes, sql_code, message)
def _parse_op_response(self):
b = self.recv_channel(16)
h = bytes_to_bint(b[0:4]) # Object handle
oid = b[4:12] # Object ID
buf_len = bytes_to_bint(b[12:]) # buffer length
buf = self.recv_channel(buf_len, word_alignment=True)
(gds_codes, sql_code, message) = self._parse_status_vector()
if gds_codes.intersection([
335544838, 335544879, 335544880, 335544466, 335544665, 335544347, 335544558
]):
raise IntegrityError(message, gds_codes, sql_code)
elif gds_codes.intersection([335544321]):
warnings.warn(message)
elif (sql_code or message) and not gds_codes.intersection([335544434]):
raise OperationalError(message, gds_codes, sql_code)
return (h, oid, buf)
def _parse_op_event(self):
b = self.recv_channel(4096) # too large TODO: read step by step
# TODO: parse event name
db_handle = bytes_to_bint(b[0:4])
event_id = bytes_to_bint(b[-4:])
return (db_handle, event_id, {})
def _create_blob(self, trans_handle, b):
self._op_create_blob2(trans_handle)
(blob_handle, blob_id, buf) = self._op_response()
i = 0
while i < len(b):
self._op_put_segment(blob_handle, b[i:i+BLOB_SEGMENT_SIZE])
(h, oid, buf) = self._op_response()
i += BLOB_SEGMENT_SIZE
self._op_close_blob(blob_handle)
(h, oid, buf) = self._op_response()
return blob_id
def params_to_blr(self, trans_handle, params):
"Convert parameter array to BLR and values format."
ln = len(params) * 2
blr = bs([5, 2, 4, 0, ln & 255, ln >> 8])
if self.accept_version < PROTOCOL_VERSION13:
values = bs([])
else:
# start with null indicator bitmap
null_indicator = 0
for i, p in enumerate(params):
if p is None:
null_indicator |= (1 << i)
n = len(params) // 8
if len(params) % 8 != 0:
n += 1
if n % 4: # padding
n += 4 - n % 4
null_indicator_bytes = []
for i in range(n):
null_indicator_bytes.append(null_indicator & 255)
null_indicator >>= 8
values = bs(null_indicator_bytes)
for p in params:
if (
(PYTHON_MAJOR_VER == 2 and type(p) == unicode) or
(PYTHON_MAJOR_VER == 3 and type(p) == str)
):
p = self.str_to_bytes(p)
t = type(p)
if p is None:
v = bs([])
blr += bs([14, 0, 0])
elif (
(PYTHON_MAJOR_VER == 2 and t == str) or
(PYTHON_MAJOR_VER == 3 and t == bytes)
):
if len(p) > MAX_CHAR_LENGTH:
v = self._create_blob(trans_handle, p)
blr += bs([9, 0])
else:
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
elif t == int:
v = bint_to_bytes(p, 4)
blr += bs([8, 0]) # blr_long
elif t == float and p == float("inf"):
v = b'\x7f\x80\x00\x00'
blr += bs([10])
elif t == decimal.Decimal or t == float:
if t == float:
p = decimal.Decimal(str(p))
(sign, digits, exponent) = p.as_tuple()
v = 0
ln = len(digits)
for i in range(ln):
v += digits[i] * (10 ** (ln - i - 1))
if sign:
v *= -1
v = bint_to_bytes(v, 8)
if exponent < 0:
exponent += 256
blr += bs([16, exponent])
elif t == datetime.date:
v = convert_date(p)
blr += bs([12])
elif t == datetime.time:
if p.tzinfo:
v = convert_time_tz(p)
blr += bs([28])
else:
v = convert_time(p)
blr += bs([13])
elif t == datetime.datetime:
if p.tzinfo:
v = convert_timestamp_tz(p)
blr += bs([29])
else:
v = convert_timestamp(p)
blr += bs([35])
elif t == bool:
v = bs([1, 0, 0, 0]) if p else bs([0, 0, 0, 0])
blr += bs([23])
else: # fallback, convert to string
p = p.__repr__()
if PYTHON_MAJOR_VER == 3 or (PYTHON_MAJOR_VER == 2 and type(p) == unicode):
p = self.str_to_bytes(p)
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
blr += bs([7, 0])
values += v
if self.accept_version < PROTOCOL_VERSION13:
values += bs([0]) * 4 if not p is None else bs([0xff, 0xff, 0xff, 0xff])
blr += bs([255, 76]) # [blr_end, blr_eoc]
return blr, values
def uid(self, auth_plugin_name, wire_crypt):
def pack_cnct_param(k, v):
if k != CNCT_specific_data:
return bs([k] + [len(v)]) + v
# specific_data split per 254 bytes
b = b''
i = 0
while len(v) > 254:
b += bs([k, 255, i]) + v[:254]
v = v[254:]
i += 1
b += bs([k, len(v)+1, i]) + v
return b
auth_plugin_list = ('Srp256', 'Srp', 'Legacy_Auth')
# get and calculate CNCT_xxxx values
if sys.platform == 'win32':
user = os.environ['USERNAME']
hostname = os.environ['COMPUTERNAME']
else:
user = os.environ.get('USER', '')
hostname = socket.gethostname()
if auth_plugin_name in ('Srp256', 'Srp'):
self.client_public_key, self.client_private_key = srp.client_seed()
specific_data = bytes_to_hex(srp.long2bytes(self.client_public_key))
elif auth_plugin_name == 'Legacy_Auth':
assert crypt, "Legacy_Auth needs crypt module"
specific_data = self.str_to_bytes(get_crypt(self.password))
else:
raise OperationalError("Unknown auth plugin name '%s'" % (auth_plugin_name,))
self.plugin_name = auth_plugin_name
self.plugin_list = b','.join([s.encode('utf-8') for s in auth_plugin_list])
client_crypt = b'\x01\x00\x00\x00' if wire_crypt else b'\x00\x00\x00\x00'
# set CNCT_xxxx values
r = b''
r += pack_cnct_param(CNCT_login, self.str_to_bytes(self.user))
r += pack_cnct_param(CNCT_plugin_name, self.str_to_bytes(self.plugin_name))
r += pack_cnct_param(CNCT_plugin_list, self.plugin_list)
r += pack_cnct_param(CNCT_specific_data, specific_data)
r += pack_cnct_param(CNCT_client_crypt, client_crypt)
r += pack_cnct_param(CNCT_user, self.str_to_bytes(user))
r += pack_cnct_param(CNCT_host, self.str_to_bytes(hostname))
r += pack_cnct_param(CNCT_user_verification, b'')
return r
@wire_operation
def _op_connect(self, auth_plugin_name, wire_crypt):
protocols = [
# PROTOCOL_VERSION, Arch type (Generic=1), min, max, weight
'0000000a00000001000000000000000500000002', # 10, 1, 0, 5, 2
'ffff800b00000001000000000000000500000004', # 11, 1, 0, 5, 4
'ffff800c00000001000000000000000500000006', # 12, 1, 0, 5, 6
'ffff800d00000001000000000000000500000008', # 13, 1, 0, 5, 8
]
p = xdrlib.Packer()
p.pack_int(self.op_connect)
p.pack_int(self.op_attach)
p.pack_int(3) # CONNECT_VERSION
p.pack_int(1) # arch_generic
p.pack_string(self.str_to_bytes(self.filename if self.filename else ''))
p.pack_int(len(protocols))
p.pack_bytes(self.uid(auth_plugin_name, wire_crypt))
self.sock.send(p.get_buffer() + hex_to_bytes(''.join(protocols)))
@wire_operation
def _op_create(self, page_size=4096):
dpb = bs([1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_set_db_charset, len(s)]) + s
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
dpb += bs([isc_dpb_sql_dialect, 4]) + int_to_bytes(3, 4)
dpb += bs([isc_dpb_force_write, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_overwrite, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_page_size, 4]) + int_to_bytes(page_size, 4)
p = xdrlib.Packer()
p.pack_int(self.op_create)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cont_auth(self, auth_data, auth_plugin_name, auth_plugin_list, keys):
p = xdrlib.Packer()
p.pack_int(self.op_cont_auth)
p.pack_string(bytes_to_hex(auth_data))
p.pack_bytes(auth_plugin_name)
p.pack_bytes(auth_plugin_list)
p.pack_bytes(keys)
self.sock.send(p.get_buffer())
@wire_operation
def _parse_connect_response(self):
# want and treat op_accept or op_cond_accept or op_accept_data
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
if bytes_to_bint(b) == self.op_reject:
raise OperationalError('Connection is rejected')
op_code = bytes_to_bint(b)
if op_code == self.op_response:
return self._parse_op_response() # error occured
b = self.recv_channel(12)
self.accept_version = byte_to_int(b[3])
self.accept_architecture = bytes_to_bint(b[4:8])
self.accept_type = bytes_to_bint(b[8:])
self.lazy_response_count = 0
if op_code == self.op_cond_accept or op_code == self.op_accept_data:
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
self.accept_plugin_name = self.recv_channel(ln, word_alignment=True)
is_authenticated = bytes_to_bint(self.recv_channel(4))
ln = bytes_to_bint(self.recv_channel(4))
self.recv_channel(ln, word_alignment=True) # keys
if is_authenticated == 0:
if self.accept_plugin_name in (b'Srp256', b'Srp'):
hash_algo = {
b'Srp256': hashlib.sha256,
b'Srp': hashlib.sha1,
}[self.accept_plugin_name]
user = self.user
if len(user) > 2 and user[0] == user[-1] == '"':
user = user[1:-1]
user = user.replace('""','"')
else:
user = user.upper()
if len(data) == 0:
# send op_cont_auth
self._op_cont_auth(
srp.long2bytes(self.client_public_key),
self.accept_plugin_name,
self.plugin_list,
b''
)
# parse op_cont_auth
b = self.recv_channel(4)
assert bytes_to_bint(b) == self.op_cont_auth
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_name = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_list = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
keys = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_int(data[:2])
server_salt = data[2:ln+2]
server_public_key = srp.bytes2long(
hex_to_bytes(data[4+ln:]))
auth_data, session_key = srp.client_proof(
self.str_to_bytes(user),
self.str_to_bytes(self.password),
server_salt,
self.client_public_key,
server_public_key,
self.client_private_key,
hash_algo)
elif self.accept_plugin_name == b'Legacy_Auth':
auth_data = self.str_to_bytes(get_crypt(self.password))
session_key = b''
else:
raise OperationalError(
'Unknown auth plugin %s' % (self.accept_plugin_name)
)
else:
auth_data = b''
session_key = b''
if op_code == self.op_cond_accept:
self._op_cont_auth(
auth_data,
self.accept_plugin_name,
self.plugin_list,
b''
)
(h, oid, buf) = self._op_response()
if self.wire_crypt and session_key:
# op_crypt: plugin[Arc4] key[Symmetric]
p = xdrlib.Packer()
p.pack_int(self.op_crypt)
p.pack_string(b'Arc4')
p.pack_string(b'Symmetric')
self.sock.send(p.get_buffer())
self.sock.set_translator(
ARC4.new(session_key), ARC4.new(session_key))
(h, oid, buf) = self._op_response()
else: # use later _op_attach() and _op_create()
self.auth_data = auth_data
else:
assert op_code == self.op_accept
@wire_operation
def _op_attach(self):
dpb = bs([isc_dpb_version1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
dpb += bs([isc_dpb_process_id, 4]) + int_to_bytes(os.getpid(), 4)
s = self.str_to_bytes(sys.argv[0])
dpb += bs([isc_dpb_process_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
p = xdrlib.Packer()
p.pack_int(self.op_attach)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_drop_database(self):
if self.db_handle is None:
raise OperationalError('_op_drop_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_drop_database)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_attach(self):
spb = bs([2, 2])
s = self.str_to_bytes(self.user)
spb += bs([isc_spb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
spb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
spb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.auth_data:
s = self.str_to_bytes(bytes_to_hex(self.auth_data))
spb += bs([isc_dpb_specific_auth_data, len(s)]) + s
spb += bs([isc_spb_dummy_packet_interval, 0x04, 0x78, 0x0a, 0x00, 0x00])
p = xdrlib.Packer()
p.pack_int(self.op_service_attach)
p.pack_int(0)
p.pack_string(self.str_to_bytes('service_mgr'))
p.pack_bytes(spb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_info(self, param, item, buffer_length=512):
if self.db_handle is None:
raise OperationalError('_op_service_info() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_info)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
p.pack_bytes(item)
p.pack_int(buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_start(self, param):
if self.db_handle is None:
raise OperationalError('_op_service_start() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_start)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_detach(self):
if self.db_handle is None:
raise OperationalError('_op_service_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_database(self, b):
if self.db_handle is None:
raise OperationalError('_op_info_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_info_database)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_transaction(self, tpb):
if self.db_handle is None:
raise OperationalError('_op_transaction() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_transaction)
p.pack_int(self.db_handle)
p.pack_bytes(tpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_allocate_statement(self):
if self.db_handle is None:
raise OperationalError('_op_allocate_statement() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_allocate_statement)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_transaction(self, trans_handle, b):
p = xdrlib.Packer()
p.pack_int(self.op_info_transaction)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_free_statement(self, stmt_handle, mode):
p = xdrlib.Packer()
p.pack_int(self.op_free_statement)
p.pack_int(stmt_handle)
p.pack_int(mode)
self.sock.send(p.get_buffer())
@wire_operation
def _op_prepare_statement(self, stmt_handle, trans_handle, query, option_items=None):
if option_items is None:
option_items=bs([])
desc_items = option_items + bs([isc_info_sql_stmt_type])+INFO_SQL_SELECT_DESCRIBE_VARS
p = xdrlib.Packer()
p.pack_int(self.op_prepare_statement)
p.pack_int(trans_handle)
p.pack_int(stmt_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_sql(self, stmt_handle, vars):
p = xdrlib.Packer()
p.pack_int(self.op_info_sql)
p.pack_int(stmt_handle)
p.pack_int(0)
p.pack_bytes(vars)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_execute(self, stmt_handle, trans_handle, params):
p = xdrlib.Packer()
p.pack_int(self.op_execute)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
self.sock.send(p.get_buffer() + values)
@wire_operation
def _op_execute2(self, stmt_handle, trans_handle, params, output_blr):
p = xdrlib.Packer()
p.pack_int(self.op_execute2)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
values = b''
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
q = xdrlib.Packer()
q.pack_bytes(output_blr)
q.pack_int(0)
self.sock.send(p.get_buffer() + values + q.get_buffer())
@wire_operation
def _op_exec_immediate(self, trans_handle, query):
if self.db_handle is None:
raise OperationalError('_op_exec_immediate() Invalid db handle')
desc_items = bs([])
p = xdrlib.Packer()
p.pack_int(self.op_exec_immediate)
p.pack_int(trans_handle)
p.pack_int(self.db_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch(self, stmt_handle, blr):
p = xdrlib.Packer()
p.pack_int(self.op_fetch)
p.pack_int(stmt_handle)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(400)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch_response(self, stmt_handle, xsqlda):
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_dummy:
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
op_code = bytes_to_bint(self.recv_channel(4))
if op_code != self.op_fetch_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("op_fetch_response:op_code = %d" % (op_code,))
b = self.recv_channel(8)
status = bytes_to_bint(b[:4])
count = bytes_to_bint(b[4:8])
rows = []
while count:
r = [None] * len(xsqlda)
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r[i] = x.value(raw_value)
else: # PROTOCOL_VERSION13
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
continue
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r[i] = x.value(raw_value)
rows.append(r)
b = self.recv_channel(12)
op_code = bytes_to_bint(b[:4])
status = bytes_to_bint(b[4:8])
count = bytes_to_bint(b[8:])
return rows, status != 100
@wire_operation
def _op_detach(self):
if self.db_handle is None:
raise OperationalError('_op_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_open_blob(self, blob_id, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_open_blob)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer() + blob_id)
@wire_operation
def _op_create_blob2(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_create_blob2)
p.pack_int(0)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_get_segment(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_get_segment)
p.pack_int(blob_handle)
p.pack_int(self.buffer_length)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_put_segment(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_put_segment)
p.pack_int(blob_handle)
p.pack_int(ln)
p.pack_int(ln)
pad_length = (4-ln) & 3
self.sock.send(p.get_buffer() + seg_data + bs([0])*pad_length)
@wire_operation
def _op_batch_segments(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_batch_segments)
p.pack_int(blob_handle)
p.pack_int(ln + 2)
p.pack_int(ln + 2)
pad_length = ((4-(ln+2)) & 3)
self.sock.send(p.get_buffer() + int_to_bytes(ln, 2) + seg_data + bs([0])*pad_length)
@wire_operation
def _op_close_blob(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_close_blob)
p.pack_int(blob_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_que_events(self, event_names, event_id):
if self.db_handle is None:
raise OperationalError('_op_que_events() Invalid db handle')
params = bs([1])
for name, n in event_names.items():
params += bs([len(name)])
params += self.str_to_bytes(name)
params += int_to_bytes(n, 4)
p = xdrlib.Packer()
p.pack_int(self.op_que_events)
p.pack_int(self.db_handle)
p.pack_bytes(params)
p.pack_int(0) # ast
p.pack_int(0) # args
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cancel_events(self, event_id):
if self.db_handle is None:
raise OperationalError('_op_cancel_events() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_cancel_events)
p.pack_int(self.db_handle)
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_connect_request(self):
if self.db_handle is None:
raise OperationalError('_op_connect_request() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_connect_request)
p.pack_int(1) # async
p.pack_int(self.db_handle)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_response(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_cont_auth:
raise OperationalError('Unauthorized')
elif op_code != self.op_response:
raise InternalError("_op_response:op_code = %d" % (op_code,))
return self._parse_op_response()
@wire_operation
def _op_event(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_exit or bytes_to_bint(b) == self.op_exit:
raise DisconnectByPeer
if op_code != self.op_event:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_event:op_code = %d" % (op_code,))
return self._parse_op_event()
@wire_operation
def _op_sql_response(self, xsqlda):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code != self.op_sql_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_sql_response:op_code = %d" % (op_code,))
b = self.recv_channel(4)
count = bytes_to_bint(b[:4])
r = []
if count == 0:
return []
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r.append(x.value(raw_value))
else:
r.append(None)
else:
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
r.append(None)
else:
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r.append(x.value(raw_value))
return r
def _wait_for_event(self, timeout):
event_names = {}
event_id = 0
while True:
b4 = self.recv_channel(4)
if b4 is None:
return None
op_code = bytes_to_bint(b4)
if op_code == self.op_dummy:
pass
elif op_code == self.op_exit or op_code == self.op_disconnect:
break
elif op_code == self.op_event:
bytes_to_int(self.recv_channel(4)) # db_handle
ln = bytes_to_bint(self.recv_channel(4))
b = self.recv_channel(ln, word_alignment=True)
assert byte_to_int(b[0]) == 1
i = 1
while i < len(b):
ln = byte_to_int(b[i])
s = self.connection.bytes_to_str(b[i+1:i+1+ln])
n = bytes_to_int(b[i+1+ln:i+1+ln+4])
event_names[s] = n
i += ln + 5
self.recv_channel(8) # ignore AST info
event_id = bytes_to_bint(self.recv_channel(4))
break
else:
raise InternalError("_wait_for_event:op_code = %d" % (op_code,))
return (event_id, event_names)
|
nakagami/pyfirebirdsql
|
firebirdsql/wireprotocol.py
|
WireProtocol.bytes_to_ustr
|
python
|
def bytes_to_ustr(self, b):
"convert bytes array to unicode string"
return b.decode(charset_map.get(self.charset, self.charset))
|
convert bytes array to unicode string
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/wireprotocol.py#L240-L242
| null |
class WireProtocol(object):
buffer_length = 1024
op_connect = 1
op_exit = 2
op_accept = 3
op_reject = 4
op_protocol = 5
op_disconnect = 6
op_response = 9
op_attach = 19
op_create = 20
op_detach = 21
op_transaction = 29
op_commit = 30
op_rollback = 31
op_open_blob = 35
op_get_segment = 36
op_put_segment = 37
op_close_blob = 39
op_info_database = 40
op_info_transaction = 42
op_batch_segments = 44
op_que_events = 48
op_cancel_events = 49
op_commit_retaining = 50
op_event = 52
op_connect_request = 53
op_aux_connect = 53
op_create_blob2 = 57
op_allocate_statement = 62
op_execute = 63
op_exec_immediate = 64
op_fetch = 65
op_fetch_response = 66
op_free_statement = 67
op_prepare_statement = 68
op_info_sql = 70
op_dummy = 71
op_execute2 = 76
op_sql_response = 78
op_drop_database = 81
op_service_attach = 82
op_service_detach = 83
op_service_info = 84
op_service_start = 85
op_rollback_retaining = 86
# FB3
op_update_account_info = 87
op_authenticate_user = 88
op_partial = 89
op_trusted_auth = 90
op_cancel = 91
op_cont_auth = 92
op_ping = 93
op_accept_data = 94
op_abort_aux_connection = 95
op_crypt = 96
op_crypt_key_callback = 97
op_cond_accept = 98
def __init__(self):
self.accept_plugin_name = ''
self.auth_data = b''
def recv_channel(self, nbytes, word_alignment=False):
n = nbytes
if word_alignment and (n % 4):
n += 4 - nbytes % 4 # 4 bytes word alignment
r = bs([])
while n:
if (self.timeout is not None and select.select([self.sock._sock], [], [], self.timeout)[0] == []):
break
b = self.sock.recv(n)
if not b:
break
r += b
n -= len(b)
if len(r) < nbytes:
raise OperationalError('Can not recv() packets')
return r[:nbytes]
def str_to_bytes(self, s):
"convert str to bytes"
if (PYTHON_MAJOR_VER == 3 or
(PYTHON_MAJOR_VER == 2 and type(s) == unicode)):
return s.encode(charset_map.get(self.charset, self.charset))
return s
def bytes_to_str(self, b):
"convert bytes array to raw string"
if PYTHON_MAJOR_VER == 3:
return b.decode(charset_map.get(self.charset, self.charset))
return b
def _parse_status_vector(self):
sql_code = 0
gds_codes = set()
message = ''
n = bytes_to_bint(self.recv_channel(4))
while n != isc_arg_end:
if n == isc_arg_gds:
gds_code = bytes_to_bint(self.recv_channel(4))
if gds_code:
gds_codes.add(gds_code)
message += messages.get(gds_code, '@1')
num_arg = 0
elif n == isc_arg_number:
num = bytes_to_bint(self.recv_channel(4))
if gds_code == 335544436:
sql_code = num
num_arg += 1
message = message.replace('@' + str(num_arg), str(num))
elif n == isc_arg_string:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
num_arg += 1
message = message.replace('@' + str(num_arg), s)
elif n == isc_arg_interpreted:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
message += s
elif n == isc_arg_sql_state:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
n = bytes_to_bint(self.recv_channel(4))
return (gds_codes, sql_code, message)
def _parse_op_response(self):
b = self.recv_channel(16)
h = bytes_to_bint(b[0:4]) # Object handle
oid = b[4:12] # Object ID
buf_len = bytes_to_bint(b[12:]) # buffer length
buf = self.recv_channel(buf_len, word_alignment=True)
(gds_codes, sql_code, message) = self._parse_status_vector()
if gds_codes.intersection([
335544838, 335544879, 335544880, 335544466, 335544665, 335544347, 335544558
]):
raise IntegrityError(message, gds_codes, sql_code)
elif gds_codes.intersection([335544321]):
warnings.warn(message)
elif (sql_code or message) and not gds_codes.intersection([335544434]):
raise OperationalError(message, gds_codes, sql_code)
return (h, oid, buf)
def _parse_op_event(self):
b = self.recv_channel(4096) # too large TODO: read step by step
# TODO: parse event name
db_handle = bytes_to_bint(b[0:4])
event_id = bytes_to_bint(b[-4:])
return (db_handle, event_id, {})
def _create_blob(self, trans_handle, b):
self._op_create_blob2(trans_handle)
(blob_handle, blob_id, buf) = self._op_response()
i = 0
while i < len(b):
self._op_put_segment(blob_handle, b[i:i+BLOB_SEGMENT_SIZE])
(h, oid, buf) = self._op_response()
i += BLOB_SEGMENT_SIZE
self._op_close_blob(blob_handle)
(h, oid, buf) = self._op_response()
return blob_id
def params_to_blr(self, trans_handle, params):
"Convert parameter array to BLR and values format."
ln = len(params) * 2
blr = bs([5, 2, 4, 0, ln & 255, ln >> 8])
if self.accept_version < PROTOCOL_VERSION13:
values = bs([])
else:
# start with null indicator bitmap
null_indicator = 0
for i, p in enumerate(params):
if p is None:
null_indicator |= (1 << i)
n = len(params) // 8
if len(params) % 8 != 0:
n += 1
if n % 4: # padding
n += 4 - n % 4
null_indicator_bytes = []
for i in range(n):
null_indicator_bytes.append(null_indicator & 255)
null_indicator >>= 8
values = bs(null_indicator_bytes)
for p in params:
if (
(PYTHON_MAJOR_VER == 2 and type(p) == unicode) or
(PYTHON_MAJOR_VER == 3 and type(p) == str)
):
p = self.str_to_bytes(p)
t = type(p)
if p is None:
v = bs([])
blr += bs([14, 0, 0])
elif (
(PYTHON_MAJOR_VER == 2 and t == str) or
(PYTHON_MAJOR_VER == 3 and t == bytes)
):
if len(p) > MAX_CHAR_LENGTH:
v = self._create_blob(trans_handle, p)
blr += bs([9, 0])
else:
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
elif t == int:
v = bint_to_bytes(p, 4)
blr += bs([8, 0]) # blr_long
elif t == float and p == float("inf"):
v = b'\x7f\x80\x00\x00'
blr += bs([10])
elif t == decimal.Decimal or t == float:
if t == float:
p = decimal.Decimal(str(p))
(sign, digits, exponent) = p.as_tuple()
v = 0
ln = len(digits)
for i in range(ln):
v += digits[i] * (10 ** (ln - i - 1))
if sign:
v *= -1
v = bint_to_bytes(v, 8)
if exponent < 0:
exponent += 256
blr += bs([16, exponent])
elif t == datetime.date:
v = convert_date(p)
blr += bs([12])
elif t == datetime.time:
if p.tzinfo:
v = convert_time_tz(p)
blr += bs([28])
else:
v = convert_time(p)
blr += bs([13])
elif t == datetime.datetime:
if p.tzinfo:
v = convert_timestamp_tz(p)
blr += bs([29])
else:
v = convert_timestamp(p)
blr += bs([35])
elif t == bool:
v = bs([1, 0, 0, 0]) if p else bs([0, 0, 0, 0])
blr += bs([23])
else: # fallback, convert to string
p = p.__repr__()
if PYTHON_MAJOR_VER == 3 or (PYTHON_MAJOR_VER == 2 and type(p) == unicode):
p = self.str_to_bytes(p)
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
blr += bs([7, 0])
values += v
if self.accept_version < PROTOCOL_VERSION13:
values += bs([0]) * 4 if not p is None else bs([0xff, 0xff, 0xff, 0xff])
blr += bs([255, 76]) # [blr_end, blr_eoc]
return blr, values
def uid(self, auth_plugin_name, wire_crypt):
def pack_cnct_param(k, v):
if k != CNCT_specific_data:
return bs([k] + [len(v)]) + v
# specific_data split per 254 bytes
b = b''
i = 0
while len(v) > 254:
b += bs([k, 255, i]) + v[:254]
v = v[254:]
i += 1
b += bs([k, len(v)+1, i]) + v
return b
auth_plugin_list = ('Srp256', 'Srp', 'Legacy_Auth')
# get and calculate CNCT_xxxx values
if sys.platform == 'win32':
user = os.environ['USERNAME']
hostname = os.environ['COMPUTERNAME']
else:
user = os.environ.get('USER', '')
hostname = socket.gethostname()
if auth_plugin_name in ('Srp256', 'Srp'):
self.client_public_key, self.client_private_key = srp.client_seed()
specific_data = bytes_to_hex(srp.long2bytes(self.client_public_key))
elif auth_plugin_name == 'Legacy_Auth':
assert crypt, "Legacy_Auth needs crypt module"
specific_data = self.str_to_bytes(get_crypt(self.password))
else:
raise OperationalError("Unknown auth plugin name '%s'" % (auth_plugin_name,))
self.plugin_name = auth_plugin_name
self.plugin_list = b','.join([s.encode('utf-8') for s in auth_plugin_list])
client_crypt = b'\x01\x00\x00\x00' if wire_crypt else b'\x00\x00\x00\x00'
# set CNCT_xxxx values
r = b''
r += pack_cnct_param(CNCT_login, self.str_to_bytes(self.user))
r += pack_cnct_param(CNCT_plugin_name, self.str_to_bytes(self.plugin_name))
r += pack_cnct_param(CNCT_plugin_list, self.plugin_list)
r += pack_cnct_param(CNCT_specific_data, specific_data)
r += pack_cnct_param(CNCT_client_crypt, client_crypt)
r += pack_cnct_param(CNCT_user, self.str_to_bytes(user))
r += pack_cnct_param(CNCT_host, self.str_to_bytes(hostname))
r += pack_cnct_param(CNCT_user_verification, b'')
return r
@wire_operation
def _op_connect(self, auth_plugin_name, wire_crypt):
protocols = [
# PROTOCOL_VERSION, Arch type (Generic=1), min, max, weight
'0000000a00000001000000000000000500000002', # 10, 1, 0, 5, 2
'ffff800b00000001000000000000000500000004', # 11, 1, 0, 5, 4
'ffff800c00000001000000000000000500000006', # 12, 1, 0, 5, 6
'ffff800d00000001000000000000000500000008', # 13, 1, 0, 5, 8
]
p = xdrlib.Packer()
p.pack_int(self.op_connect)
p.pack_int(self.op_attach)
p.pack_int(3) # CONNECT_VERSION
p.pack_int(1) # arch_generic
p.pack_string(self.str_to_bytes(self.filename if self.filename else ''))
p.pack_int(len(protocols))
p.pack_bytes(self.uid(auth_plugin_name, wire_crypt))
self.sock.send(p.get_buffer() + hex_to_bytes(''.join(protocols)))
@wire_operation
def _op_create(self, page_size=4096):
dpb = bs([1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_set_db_charset, len(s)]) + s
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
dpb += bs([isc_dpb_sql_dialect, 4]) + int_to_bytes(3, 4)
dpb += bs([isc_dpb_force_write, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_overwrite, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_page_size, 4]) + int_to_bytes(page_size, 4)
p = xdrlib.Packer()
p.pack_int(self.op_create)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cont_auth(self, auth_data, auth_plugin_name, auth_plugin_list, keys):
p = xdrlib.Packer()
p.pack_int(self.op_cont_auth)
p.pack_string(bytes_to_hex(auth_data))
p.pack_bytes(auth_plugin_name)
p.pack_bytes(auth_plugin_list)
p.pack_bytes(keys)
self.sock.send(p.get_buffer())
@wire_operation
def _parse_connect_response(self):
# want and treat op_accept or op_cond_accept or op_accept_data
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
if bytes_to_bint(b) == self.op_reject:
raise OperationalError('Connection is rejected')
op_code = bytes_to_bint(b)
if op_code == self.op_response:
return self._parse_op_response() # error occured
b = self.recv_channel(12)
self.accept_version = byte_to_int(b[3])
self.accept_architecture = bytes_to_bint(b[4:8])
self.accept_type = bytes_to_bint(b[8:])
self.lazy_response_count = 0
if op_code == self.op_cond_accept or op_code == self.op_accept_data:
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
self.accept_plugin_name = self.recv_channel(ln, word_alignment=True)
is_authenticated = bytes_to_bint(self.recv_channel(4))
ln = bytes_to_bint(self.recv_channel(4))
self.recv_channel(ln, word_alignment=True) # keys
if is_authenticated == 0:
if self.accept_plugin_name in (b'Srp256', b'Srp'):
hash_algo = {
b'Srp256': hashlib.sha256,
b'Srp': hashlib.sha1,
}[self.accept_plugin_name]
user = self.user
if len(user) > 2 and user[0] == user[-1] == '"':
user = user[1:-1]
user = user.replace('""','"')
else:
user = user.upper()
if len(data) == 0:
# send op_cont_auth
self._op_cont_auth(
srp.long2bytes(self.client_public_key),
self.accept_plugin_name,
self.plugin_list,
b''
)
# parse op_cont_auth
b = self.recv_channel(4)
assert bytes_to_bint(b) == self.op_cont_auth
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_name = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_list = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
keys = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_int(data[:2])
server_salt = data[2:ln+2]
server_public_key = srp.bytes2long(
hex_to_bytes(data[4+ln:]))
auth_data, session_key = srp.client_proof(
self.str_to_bytes(user),
self.str_to_bytes(self.password),
server_salt,
self.client_public_key,
server_public_key,
self.client_private_key,
hash_algo)
elif self.accept_plugin_name == b'Legacy_Auth':
auth_data = self.str_to_bytes(get_crypt(self.password))
session_key = b''
else:
raise OperationalError(
'Unknown auth plugin %s' % (self.accept_plugin_name)
)
else:
auth_data = b''
session_key = b''
if op_code == self.op_cond_accept:
self._op_cont_auth(
auth_data,
self.accept_plugin_name,
self.plugin_list,
b''
)
(h, oid, buf) = self._op_response()
if self.wire_crypt and session_key:
# op_crypt: plugin[Arc4] key[Symmetric]
p = xdrlib.Packer()
p.pack_int(self.op_crypt)
p.pack_string(b'Arc4')
p.pack_string(b'Symmetric')
self.sock.send(p.get_buffer())
self.sock.set_translator(
ARC4.new(session_key), ARC4.new(session_key))
(h, oid, buf) = self._op_response()
else: # use later _op_attach() and _op_create()
self.auth_data = auth_data
else:
assert op_code == self.op_accept
@wire_operation
def _op_attach(self):
dpb = bs([isc_dpb_version1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
dpb += bs([isc_dpb_process_id, 4]) + int_to_bytes(os.getpid(), 4)
s = self.str_to_bytes(sys.argv[0])
dpb += bs([isc_dpb_process_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
p = xdrlib.Packer()
p.pack_int(self.op_attach)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_drop_database(self):
if self.db_handle is None:
raise OperationalError('_op_drop_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_drop_database)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_attach(self):
spb = bs([2, 2])
s = self.str_to_bytes(self.user)
spb += bs([isc_spb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
spb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
spb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.auth_data:
s = self.str_to_bytes(bytes_to_hex(self.auth_data))
spb += bs([isc_dpb_specific_auth_data, len(s)]) + s
spb += bs([isc_spb_dummy_packet_interval, 0x04, 0x78, 0x0a, 0x00, 0x00])
p = xdrlib.Packer()
p.pack_int(self.op_service_attach)
p.pack_int(0)
p.pack_string(self.str_to_bytes('service_mgr'))
p.pack_bytes(spb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_info(self, param, item, buffer_length=512):
if self.db_handle is None:
raise OperationalError('_op_service_info() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_info)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
p.pack_bytes(item)
p.pack_int(buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_start(self, param):
if self.db_handle is None:
raise OperationalError('_op_service_start() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_start)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_detach(self):
if self.db_handle is None:
raise OperationalError('_op_service_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_database(self, b):
if self.db_handle is None:
raise OperationalError('_op_info_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_info_database)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_transaction(self, tpb):
if self.db_handle is None:
raise OperationalError('_op_transaction() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_transaction)
p.pack_int(self.db_handle)
p.pack_bytes(tpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_allocate_statement(self):
if self.db_handle is None:
raise OperationalError('_op_allocate_statement() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_allocate_statement)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_transaction(self, trans_handle, b):
p = xdrlib.Packer()
p.pack_int(self.op_info_transaction)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_free_statement(self, stmt_handle, mode):
p = xdrlib.Packer()
p.pack_int(self.op_free_statement)
p.pack_int(stmt_handle)
p.pack_int(mode)
self.sock.send(p.get_buffer())
@wire_operation
def _op_prepare_statement(self, stmt_handle, trans_handle, query, option_items=None):
if option_items is None:
option_items=bs([])
desc_items = option_items + bs([isc_info_sql_stmt_type])+INFO_SQL_SELECT_DESCRIBE_VARS
p = xdrlib.Packer()
p.pack_int(self.op_prepare_statement)
p.pack_int(trans_handle)
p.pack_int(stmt_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_sql(self, stmt_handle, vars):
p = xdrlib.Packer()
p.pack_int(self.op_info_sql)
p.pack_int(stmt_handle)
p.pack_int(0)
p.pack_bytes(vars)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_execute(self, stmt_handle, trans_handle, params):
p = xdrlib.Packer()
p.pack_int(self.op_execute)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
self.sock.send(p.get_buffer() + values)
@wire_operation
def _op_execute2(self, stmt_handle, trans_handle, params, output_blr):
p = xdrlib.Packer()
p.pack_int(self.op_execute2)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
values = b''
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
q = xdrlib.Packer()
q.pack_bytes(output_blr)
q.pack_int(0)
self.sock.send(p.get_buffer() + values + q.get_buffer())
@wire_operation
def _op_exec_immediate(self, trans_handle, query):
if self.db_handle is None:
raise OperationalError('_op_exec_immediate() Invalid db handle')
desc_items = bs([])
p = xdrlib.Packer()
p.pack_int(self.op_exec_immediate)
p.pack_int(trans_handle)
p.pack_int(self.db_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch(self, stmt_handle, blr):
p = xdrlib.Packer()
p.pack_int(self.op_fetch)
p.pack_int(stmt_handle)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(400)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch_response(self, stmt_handle, xsqlda):
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_dummy:
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
op_code = bytes_to_bint(self.recv_channel(4))
if op_code != self.op_fetch_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("op_fetch_response:op_code = %d" % (op_code,))
b = self.recv_channel(8)
status = bytes_to_bint(b[:4])
count = bytes_to_bint(b[4:8])
rows = []
while count:
r = [None] * len(xsqlda)
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r[i] = x.value(raw_value)
else: # PROTOCOL_VERSION13
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
continue
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r[i] = x.value(raw_value)
rows.append(r)
b = self.recv_channel(12)
op_code = bytes_to_bint(b[:4])
status = bytes_to_bint(b[4:8])
count = bytes_to_bint(b[8:])
return rows, status != 100
@wire_operation
def _op_detach(self):
if self.db_handle is None:
raise OperationalError('_op_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_open_blob(self, blob_id, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_open_blob)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer() + blob_id)
@wire_operation
def _op_create_blob2(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_create_blob2)
p.pack_int(0)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_get_segment(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_get_segment)
p.pack_int(blob_handle)
p.pack_int(self.buffer_length)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_put_segment(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_put_segment)
p.pack_int(blob_handle)
p.pack_int(ln)
p.pack_int(ln)
pad_length = (4-ln) & 3
self.sock.send(p.get_buffer() + seg_data + bs([0])*pad_length)
@wire_operation
def _op_batch_segments(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_batch_segments)
p.pack_int(blob_handle)
p.pack_int(ln + 2)
p.pack_int(ln + 2)
pad_length = ((4-(ln+2)) & 3)
self.sock.send(p.get_buffer() + int_to_bytes(ln, 2) + seg_data + bs([0])*pad_length)
@wire_operation
def _op_close_blob(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_close_blob)
p.pack_int(blob_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_que_events(self, event_names, event_id):
if self.db_handle is None:
raise OperationalError('_op_que_events() Invalid db handle')
params = bs([1])
for name, n in event_names.items():
params += bs([len(name)])
params += self.str_to_bytes(name)
params += int_to_bytes(n, 4)
p = xdrlib.Packer()
p.pack_int(self.op_que_events)
p.pack_int(self.db_handle)
p.pack_bytes(params)
p.pack_int(0) # ast
p.pack_int(0) # args
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cancel_events(self, event_id):
if self.db_handle is None:
raise OperationalError('_op_cancel_events() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_cancel_events)
p.pack_int(self.db_handle)
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_connect_request(self):
if self.db_handle is None:
raise OperationalError('_op_connect_request() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_connect_request)
p.pack_int(1) # async
p.pack_int(self.db_handle)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_response(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_cont_auth:
raise OperationalError('Unauthorized')
elif op_code != self.op_response:
raise InternalError("_op_response:op_code = %d" % (op_code,))
return self._parse_op_response()
@wire_operation
def _op_event(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_exit or bytes_to_bint(b) == self.op_exit:
raise DisconnectByPeer
if op_code != self.op_event:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_event:op_code = %d" % (op_code,))
return self._parse_op_event()
@wire_operation
def _op_sql_response(self, xsqlda):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code != self.op_sql_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_sql_response:op_code = %d" % (op_code,))
b = self.recv_channel(4)
count = bytes_to_bint(b[:4])
r = []
if count == 0:
return []
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r.append(x.value(raw_value))
else:
r.append(None)
else:
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
r.append(None)
else:
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r.append(x.value(raw_value))
return r
def _wait_for_event(self, timeout):
event_names = {}
event_id = 0
while True:
b4 = self.recv_channel(4)
if b4 is None:
return None
op_code = bytes_to_bint(b4)
if op_code == self.op_dummy:
pass
elif op_code == self.op_exit or op_code == self.op_disconnect:
break
elif op_code == self.op_event:
bytes_to_int(self.recv_channel(4)) # db_handle
ln = bytes_to_bint(self.recv_channel(4))
b = self.recv_channel(ln, word_alignment=True)
assert byte_to_int(b[0]) == 1
i = 1
while i < len(b):
ln = byte_to_int(b[i])
s = self.connection.bytes_to_str(b[i+1:i+1+ln])
n = bytes_to_int(b[i+1+ln:i+1+ln+4])
event_names[s] = n
i += ln + 5
self.recv_channel(8) # ignore AST info
event_id = bytes_to_bint(self.recv_channel(4))
break
else:
raise InternalError("_wait_for_event:op_code = %d" % (op_code,))
return (event_id, event_names)
|
nakagami/pyfirebirdsql
|
firebirdsql/wireprotocol.py
|
WireProtocol.params_to_blr
|
python
|
def params_to_blr(self, trans_handle, params):
"Convert parameter array to BLR and values format."
ln = len(params) * 2
blr = bs([5, 2, 4, 0, ln & 255, ln >> 8])
if self.accept_version < PROTOCOL_VERSION13:
values = bs([])
else:
# start with null indicator bitmap
null_indicator = 0
for i, p in enumerate(params):
if p is None:
null_indicator |= (1 << i)
n = len(params) // 8
if len(params) % 8 != 0:
n += 1
if n % 4: # padding
n += 4 - n % 4
null_indicator_bytes = []
for i in range(n):
null_indicator_bytes.append(null_indicator & 255)
null_indicator >>= 8
values = bs(null_indicator_bytes)
for p in params:
if (
(PYTHON_MAJOR_VER == 2 and type(p) == unicode) or
(PYTHON_MAJOR_VER == 3 and type(p) == str)
):
p = self.str_to_bytes(p)
t = type(p)
if p is None:
v = bs([])
blr += bs([14, 0, 0])
elif (
(PYTHON_MAJOR_VER == 2 and t == str) or
(PYTHON_MAJOR_VER == 3 and t == bytes)
):
if len(p) > MAX_CHAR_LENGTH:
v = self._create_blob(trans_handle, p)
blr += bs([9, 0])
else:
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
elif t == int:
v = bint_to_bytes(p, 4)
blr += bs([8, 0]) # blr_long
elif t == float and p == float("inf"):
v = b'\x7f\x80\x00\x00'
blr += bs([10])
elif t == decimal.Decimal or t == float:
if t == float:
p = decimal.Decimal(str(p))
(sign, digits, exponent) = p.as_tuple()
v = 0
ln = len(digits)
for i in range(ln):
v += digits[i] * (10 ** (ln - i - 1))
if sign:
v *= -1
v = bint_to_bytes(v, 8)
if exponent < 0:
exponent += 256
blr += bs([16, exponent])
elif t == datetime.date:
v = convert_date(p)
blr += bs([12])
elif t == datetime.time:
if p.tzinfo:
v = convert_time_tz(p)
blr += bs([28])
else:
v = convert_time(p)
blr += bs([13])
elif t == datetime.datetime:
if p.tzinfo:
v = convert_timestamp_tz(p)
blr += bs([29])
else:
v = convert_timestamp(p)
blr += bs([35])
elif t == bool:
v = bs([1, 0, 0, 0]) if p else bs([0, 0, 0, 0])
blr += bs([23])
else: # fallback, convert to string
p = p.__repr__()
if PYTHON_MAJOR_VER == 3 or (PYTHON_MAJOR_VER == 2 and type(p) == unicode):
p = self.str_to_bytes(p)
v = p
nbytes = len(v)
pad_length = ((4-nbytes) & 3)
v += bs([0]) * pad_length
blr += bs([14, nbytes & 255, nbytes >> 8])
blr += bs([7, 0])
values += v
if self.accept_version < PROTOCOL_VERSION13:
values += bs([0]) * 4 if not p is None else bs([0xff, 0xff, 0xff, 0xff])
blr += bs([255, 76]) # [blr_end, blr_eoc]
return blr, values
|
Convert parameter array to BLR and values format.
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/wireprotocol.py#L318-L417
|
[
"def bs(byte_array):\n if PYTHON_MAJOR_VER == 2:\n return ''.join([chr(c) for c in byte_array])\n else:\n return bytes(byte_array)\n",
"def bint_to_bytes(val, nbytes): # Convert int value to big endian bytes.\n v = abs(val)\n b = []\n for n in range(nbytes):\n b.append((v >> (8 * (nbytes - n - 1)) & 0xff))\n if val < 0:\n for i in range(nbytes):\n b[i] = ~b[i] + 256\n b[-1] += 1\n for i in range(nbytes):\n if b[nbytes - i - 1] == 256:\n b[nbytes - i - 1] = 0\n b[nbytes - i - 2] += 1\n return bs(b)\n",
"def str_to_bytes(self, s):\n \"convert str to bytes\"\n if (PYTHON_MAJOR_VER == 3 or\n (PYTHON_MAJOR_VER == 2 and type(s) == unicode)):\n return s.encode(charset_map.get(self.charset, self.charset))\n return s\n",
"def _create_blob(self, trans_handle, b):\n self._op_create_blob2(trans_handle)\n (blob_handle, blob_id, buf) = self._op_response()\n\n i = 0\n while i < len(b):\n self._op_put_segment(blob_handle, b[i:i+BLOB_SEGMENT_SIZE])\n (h, oid, buf) = self._op_response()\n i += BLOB_SEGMENT_SIZE\n\n self._op_close_blob(blob_handle)\n (h, oid, buf) = self._op_response()\n return blob_id\n"
] |
class WireProtocol(object):
buffer_length = 1024
op_connect = 1
op_exit = 2
op_accept = 3
op_reject = 4
op_protocol = 5
op_disconnect = 6
op_response = 9
op_attach = 19
op_create = 20
op_detach = 21
op_transaction = 29
op_commit = 30
op_rollback = 31
op_open_blob = 35
op_get_segment = 36
op_put_segment = 37
op_close_blob = 39
op_info_database = 40
op_info_transaction = 42
op_batch_segments = 44
op_que_events = 48
op_cancel_events = 49
op_commit_retaining = 50
op_event = 52
op_connect_request = 53
op_aux_connect = 53
op_create_blob2 = 57
op_allocate_statement = 62
op_execute = 63
op_exec_immediate = 64
op_fetch = 65
op_fetch_response = 66
op_free_statement = 67
op_prepare_statement = 68
op_info_sql = 70
op_dummy = 71
op_execute2 = 76
op_sql_response = 78
op_drop_database = 81
op_service_attach = 82
op_service_detach = 83
op_service_info = 84
op_service_start = 85
op_rollback_retaining = 86
# FB3
op_update_account_info = 87
op_authenticate_user = 88
op_partial = 89
op_trusted_auth = 90
op_cancel = 91
op_cont_auth = 92
op_ping = 93
op_accept_data = 94
op_abort_aux_connection = 95
op_crypt = 96
op_crypt_key_callback = 97
op_cond_accept = 98
def __init__(self):
self.accept_plugin_name = ''
self.auth_data = b''
def recv_channel(self, nbytes, word_alignment=False):
n = nbytes
if word_alignment and (n % 4):
n += 4 - nbytes % 4 # 4 bytes word alignment
r = bs([])
while n:
if (self.timeout is not None and select.select([self.sock._sock], [], [], self.timeout)[0] == []):
break
b = self.sock.recv(n)
if not b:
break
r += b
n -= len(b)
if len(r) < nbytes:
raise OperationalError('Can not recv() packets')
return r[:nbytes]
def str_to_bytes(self, s):
"convert str to bytes"
if (PYTHON_MAJOR_VER == 3 or
(PYTHON_MAJOR_VER == 2 and type(s) == unicode)):
return s.encode(charset_map.get(self.charset, self.charset))
return s
def bytes_to_str(self, b):
"convert bytes array to raw string"
if PYTHON_MAJOR_VER == 3:
return b.decode(charset_map.get(self.charset, self.charset))
return b
def bytes_to_ustr(self, b):
"convert bytes array to unicode string"
return b.decode(charset_map.get(self.charset, self.charset))
def _parse_status_vector(self):
sql_code = 0
gds_codes = set()
message = ''
n = bytes_to_bint(self.recv_channel(4))
while n != isc_arg_end:
if n == isc_arg_gds:
gds_code = bytes_to_bint(self.recv_channel(4))
if gds_code:
gds_codes.add(gds_code)
message += messages.get(gds_code, '@1')
num_arg = 0
elif n == isc_arg_number:
num = bytes_to_bint(self.recv_channel(4))
if gds_code == 335544436:
sql_code = num
num_arg += 1
message = message.replace('@' + str(num_arg), str(num))
elif n == isc_arg_string:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
num_arg += 1
message = message.replace('@' + str(num_arg), s)
elif n == isc_arg_interpreted:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
message += s
elif n == isc_arg_sql_state:
nbytes = bytes_to_bint(self.recv_channel(4))
s = str(self.recv_channel(nbytes, word_alignment=True))
n = bytes_to_bint(self.recv_channel(4))
return (gds_codes, sql_code, message)
def _parse_op_response(self):
b = self.recv_channel(16)
h = bytes_to_bint(b[0:4]) # Object handle
oid = b[4:12] # Object ID
buf_len = bytes_to_bint(b[12:]) # buffer length
buf = self.recv_channel(buf_len, word_alignment=True)
(gds_codes, sql_code, message) = self._parse_status_vector()
if gds_codes.intersection([
335544838, 335544879, 335544880, 335544466, 335544665, 335544347, 335544558
]):
raise IntegrityError(message, gds_codes, sql_code)
elif gds_codes.intersection([335544321]):
warnings.warn(message)
elif (sql_code or message) and not gds_codes.intersection([335544434]):
raise OperationalError(message, gds_codes, sql_code)
return (h, oid, buf)
def _parse_op_event(self):
b = self.recv_channel(4096) # too large TODO: read step by step
# TODO: parse event name
db_handle = bytes_to_bint(b[0:4])
event_id = bytes_to_bint(b[-4:])
return (db_handle, event_id, {})
def _create_blob(self, trans_handle, b):
self._op_create_blob2(trans_handle)
(blob_handle, blob_id, buf) = self._op_response()
i = 0
while i < len(b):
self._op_put_segment(blob_handle, b[i:i+BLOB_SEGMENT_SIZE])
(h, oid, buf) = self._op_response()
i += BLOB_SEGMENT_SIZE
self._op_close_blob(blob_handle)
(h, oid, buf) = self._op_response()
return blob_id
def uid(self, auth_plugin_name, wire_crypt):
def pack_cnct_param(k, v):
if k != CNCT_specific_data:
return bs([k] + [len(v)]) + v
# specific_data split per 254 bytes
b = b''
i = 0
while len(v) > 254:
b += bs([k, 255, i]) + v[:254]
v = v[254:]
i += 1
b += bs([k, len(v)+1, i]) + v
return b
auth_plugin_list = ('Srp256', 'Srp', 'Legacy_Auth')
# get and calculate CNCT_xxxx values
if sys.platform == 'win32':
user = os.environ['USERNAME']
hostname = os.environ['COMPUTERNAME']
else:
user = os.environ.get('USER', '')
hostname = socket.gethostname()
if auth_plugin_name in ('Srp256', 'Srp'):
self.client_public_key, self.client_private_key = srp.client_seed()
specific_data = bytes_to_hex(srp.long2bytes(self.client_public_key))
elif auth_plugin_name == 'Legacy_Auth':
assert crypt, "Legacy_Auth needs crypt module"
specific_data = self.str_to_bytes(get_crypt(self.password))
else:
raise OperationalError("Unknown auth plugin name '%s'" % (auth_plugin_name,))
self.plugin_name = auth_plugin_name
self.plugin_list = b','.join([s.encode('utf-8') for s in auth_plugin_list])
client_crypt = b'\x01\x00\x00\x00' if wire_crypt else b'\x00\x00\x00\x00'
# set CNCT_xxxx values
r = b''
r += pack_cnct_param(CNCT_login, self.str_to_bytes(self.user))
r += pack_cnct_param(CNCT_plugin_name, self.str_to_bytes(self.plugin_name))
r += pack_cnct_param(CNCT_plugin_list, self.plugin_list)
r += pack_cnct_param(CNCT_specific_data, specific_data)
r += pack_cnct_param(CNCT_client_crypt, client_crypt)
r += pack_cnct_param(CNCT_user, self.str_to_bytes(user))
r += pack_cnct_param(CNCT_host, self.str_to_bytes(hostname))
r += pack_cnct_param(CNCT_user_verification, b'')
return r
@wire_operation
def _op_connect(self, auth_plugin_name, wire_crypt):
protocols = [
# PROTOCOL_VERSION, Arch type (Generic=1), min, max, weight
'0000000a00000001000000000000000500000002', # 10, 1, 0, 5, 2
'ffff800b00000001000000000000000500000004', # 11, 1, 0, 5, 4
'ffff800c00000001000000000000000500000006', # 12, 1, 0, 5, 6
'ffff800d00000001000000000000000500000008', # 13, 1, 0, 5, 8
]
p = xdrlib.Packer()
p.pack_int(self.op_connect)
p.pack_int(self.op_attach)
p.pack_int(3) # CONNECT_VERSION
p.pack_int(1) # arch_generic
p.pack_string(self.str_to_bytes(self.filename if self.filename else ''))
p.pack_int(len(protocols))
p.pack_bytes(self.uid(auth_plugin_name, wire_crypt))
self.sock.send(p.get_buffer() + hex_to_bytes(''.join(protocols)))
@wire_operation
def _op_create(self, page_size=4096):
dpb = bs([1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_set_db_charset, len(s)]) + s
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
dpb += bs([isc_dpb_sql_dialect, 4]) + int_to_bytes(3, 4)
dpb += bs([isc_dpb_force_write, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_overwrite, 4]) + int_to_bytes(1, 4)
dpb += bs([isc_dpb_page_size, 4]) + int_to_bytes(page_size, 4)
p = xdrlib.Packer()
p.pack_int(self.op_create)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cont_auth(self, auth_data, auth_plugin_name, auth_plugin_list, keys):
p = xdrlib.Packer()
p.pack_int(self.op_cont_auth)
p.pack_string(bytes_to_hex(auth_data))
p.pack_bytes(auth_plugin_name)
p.pack_bytes(auth_plugin_list)
p.pack_bytes(keys)
self.sock.send(p.get_buffer())
@wire_operation
def _parse_connect_response(self):
# want and treat op_accept or op_cond_accept or op_accept_data
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
if bytes_to_bint(b) == self.op_reject:
raise OperationalError('Connection is rejected')
op_code = bytes_to_bint(b)
if op_code == self.op_response:
return self._parse_op_response() # error occured
b = self.recv_channel(12)
self.accept_version = byte_to_int(b[3])
self.accept_architecture = bytes_to_bint(b[4:8])
self.accept_type = bytes_to_bint(b[8:])
self.lazy_response_count = 0
if op_code == self.op_cond_accept or op_code == self.op_accept_data:
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
self.accept_plugin_name = self.recv_channel(ln, word_alignment=True)
is_authenticated = bytes_to_bint(self.recv_channel(4))
ln = bytes_to_bint(self.recv_channel(4))
self.recv_channel(ln, word_alignment=True) # keys
if is_authenticated == 0:
if self.accept_plugin_name in (b'Srp256', b'Srp'):
hash_algo = {
b'Srp256': hashlib.sha256,
b'Srp': hashlib.sha1,
}[self.accept_plugin_name]
user = self.user
if len(user) > 2 and user[0] == user[-1] == '"':
user = user[1:-1]
user = user.replace('""','"')
else:
user = user.upper()
if len(data) == 0:
# send op_cont_auth
self._op_cont_auth(
srp.long2bytes(self.client_public_key),
self.accept_plugin_name,
self.plugin_list,
b''
)
# parse op_cont_auth
b = self.recv_channel(4)
assert bytes_to_bint(b) == self.op_cont_auth
ln = bytes_to_bint(self.recv_channel(4))
data = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_name = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
plugin_list = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_bint(self.recv_channel(4))
keys = self.recv_channel(ln, word_alignment=True)
ln = bytes_to_int(data[:2])
server_salt = data[2:ln+2]
server_public_key = srp.bytes2long(
hex_to_bytes(data[4+ln:]))
auth_data, session_key = srp.client_proof(
self.str_to_bytes(user),
self.str_to_bytes(self.password),
server_salt,
self.client_public_key,
server_public_key,
self.client_private_key,
hash_algo)
elif self.accept_plugin_name == b'Legacy_Auth':
auth_data = self.str_to_bytes(get_crypt(self.password))
session_key = b''
else:
raise OperationalError(
'Unknown auth plugin %s' % (self.accept_plugin_name)
)
else:
auth_data = b''
session_key = b''
if op_code == self.op_cond_accept:
self._op_cont_auth(
auth_data,
self.accept_plugin_name,
self.plugin_list,
b''
)
(h, oid, buf) = self._op_response()
if self.wire_crypt and session_key:
# op_crypt: plugin[Arc4] key[Symmetric]
p = xdrlib.Packer()
p.pack_int(self.op_crypt)
p.pack_string(b'Arc4')
p.pack_string(b'Symmetric')
self.sock.send(p.get_buffer())
self.sock.set_translator(
ARC4.new(session_key), ARC4.new(session_key))
(h, oid, buf) = self._op_response()
else: # use later _op_attach() and _op_create()
self.auth_data = auth_data
else:
assert op_code == self.op_accept
@wire_operation
def _op_attach(self):
dpb = bs([isc_dpb_version1])
s = self.str_to_bytes(self.charset)
dpb += bs([isc_dpb_lc_ctype, len(s)]) + s
s = self.str_to_bytes(self.user)
dpb += bs([isc_dpb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
dpb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
dpb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.role:
s = self.str_to_bytes(self.role)
dpb += bs([isc_dpb_sql_role_name, len(s)]) + s
dpb += bs([isc_dpb_process_id, 4]) + int_to_bytes(os.getpid(), 4)
s = self.str_to_bytes(sys.argv[0])
dpb += bs([isc_dpb_process_name, len(s)]) + s
if self.auth_data:
s = bytes_to_hex(self.auth_data)
dpb += bs([isc_dpb_specific_auth_data, len(s)]) + s
if self.timezone:
s = self.str_to_bytes(self.timezone)
dpb += bs([isc_dpb_session_time_zone, len(s)]) + s
p = xdrlib.Packer()
p.pack_int(self.op_attach)
p.pack_int(0) # Database Object ID
p.pack_string(self.str_to_bytes(self.filename))
p.pack_bytes(dpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_drop_database(self):
if self.db_handle is None:
raise OperationalError('_op_drop_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_drop_database)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_attach(self):
spb = bs([2, 2])
s = self.str_to_bytes(self.user)
spb += bs([isc_spb_user_name, len(s)]) + s
if self.accept_version < PROTOCOL_VERSION13:
enc_pass = get_crypt(self.password)
if self.accept_version == PROTOCOL_VERSION10 or not enc_pass:
s = self.str_to_bytes(self.password)
spb += bs([isc_dpb_password, len(s)]) + s
else:
enc_pass = self.str_to_bytes(enc_pass)
spb += bs([isc_dpb_password_enc, len(enc_pass)]) + enc_pass
if self.auth_data:
s = self.str_to_bytes(bytes_to_hex(self.auth_data))
spb += bs([isc_dpb_specific_auth_data, len(s)]) + s
spb += bs([isc_spb_dummy_packet_interval, 0x04, 0x78, 0x0a, 0x00, 0x00])
p = xdrlib.Packer()
p.pack_int(self.op_service_attach)
p.pack_int(0)
p.pack_string(self.str_to_bytes('service_mgr'))
p.pack_bytes(spb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_info(self, param, item, buffer_length=512):
if self.db_handle is None:
raise OperationalError('_op_service_info() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_info)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
p.pack_bytes(item)
p.pack_int(buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_start(self, param):
if self.db_handle is None:
raise OperationalError('_op_service_start() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_start)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(param)
self.sock.send(p.get_buffer())
@wire_operation
def _op_service_detach(self):
if self.db_handle is None:
raise OperationalError('_op_service_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_service_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_database(self, b):
if self.db_handle is None:
raise OperationalError('_op_info_database() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_info_database)
p.pack_int(self.db_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_transaction(self, tpb):
if self.db_handle is None:
raise OperationalError('_op_transaction() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_transaction)
p.pack_int(self.db_handle)
p.pack_bytes(tpb)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_commit_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_commit_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_rollback_retaining(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_rollback_retaining)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_allocate_statement(self):
if self.db_handle is None:
raise OperationalError('_op_allocate_statement() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_allocate_statement)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_transaction(self, trans_handle, b):
p = xdrlib.Packer()
p.pack_int(self.op_info_transaction)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_bytes(b)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_free_statement(self, stmt_handle, mode):
p = xdrlib.Packer()
p.pack_int(self.op_free_statement)
p.pack_int(stmt_handle)
p.pack_int(mode)
self.sock.send(p.get_buffer())
@wire_operation
def _op_prepare_statement(self, stmt_handle, trans_handle, query, option_items=None):
if option_items is None:
option_items=bs([])
desc_items = option_items + bs([isc_info_sql_stmt_type])+INFO_SQL_SELECT_DESCRIBE_VARS
p = xdrlib.Packer()
p.pack_int(self.op_prepare_statement)
p.pack_int(trans_handle)
p.pack_int(stmt_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_info_sql(self, stmt_handle, vars):
p = xdrlib.Packer()
p.pack_int(self.op_info_sql)
p.pack_int(stmt_handle)
p.pack_int(0)
p.pack_bytes(vars)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_execute(self, stmt_handle, trans_handle, params):
p = xdrlib.Packer()
p.pack_int(self.op_execute)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
self.sock.send(p.get_buffer() + values)
@wire_operation
def _op_execute2(self, stmt_handle, trans_handle, params, output_blr):
p = xdrlib.Packer()
p.pack_int(self.op_execute2)
p.pack_int(stmt_handle)
p.pack_int(trans_handle)
if len(params) == 0:
values = b''
p.pack_bytes(bs([]))
p.pack_int(0)
p.pack_int(0)
else:
(blr, values) = self.params_to_blr(trans_handle, params)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(1)
q = xdrlib.Packer()
q.pack_bytes(output_blr)
q.pack_int(0)
self.sock.send(p.get_buffer() + values + q.get_buffer())
@wire_operation
def _op_exec_immediate(self, trans_handle, query):
if self.db_handle is None:
raise OperationalError('_op_exec_immediate() Invalid db handle')
desc_items = bs([])
p = xdrlib.Packer()
p.pack_int(self.op_exec_immediate)
p.pack_int(trans_handle)
p.pack_int(self.db_handle)
p.pack_int(3) # dialect = 3
p.pack_string(self.str_to_bytes(query))
p.pack_bytes(desc_items)
p.pack_int(self.buffer_length)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch(self, stmt_handle, blr):
p = xdrlib.Packer()
p.pack_int(self.op_fetch)
p.pack_int(stmt_handle)
p.pack_bytes(blr)
p.pack_int(0)
p.pack_int(400)
self.sock.send(p.get_buffer())
@wire_operation
def _op_fetch_response(self, stmt_handle, xsqlda):
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_dummy:
op_code = bytes_to_bint(self.recv_channel(4))
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
op_code = bytes_to_bint(self.recv_channel(4))
if op_code != self.op_fetch_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("op_fetch_response:op_code = %d" % (op_code,))
b = self.recv_channel(8)
status = bytes_to_bint(b[:4])
count = bytes_to_bint(b[4:8])
rows = []
while count:
r = [None] * len(xsqlda)
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r[i] = x.value(raw_value)
else: # PROTOCOL_VERSION13
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
continue
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r[i] = x.value(raw_value)
rows.append(r)
b = self.recv_channel(12)
op_code = bytes_to_bint(b[:4])
status = bytes_to_bint(b[4:8])
count = bytes_to_bint(b[8:])
return rows, status != 100
@wire_operation
def _op_detach(self):
if self.db_handle is None:
raise OperationalError('_op_detach() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_detach)
p.pack_int(self.db_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_open_blob(self, blob_id, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_open_blob)
p.pack_int(trans_handle)
self.sock.send(p.get_buffer() + blob_id)
@wire_operation
def _op_create_blob2(self, trans_handle):
p = xdrlib.Packer()
p.pack_int(self.op_create_blob2)
p.pack_int(0)
p.pack_int(trans_handle)
p.pack_int(0)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_get_segment(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_get_segment)
p.pack_int(blob_handle)
p.pack_int(self.buffer_length)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_put_segment(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_put_segment)
p.pack_int(blob_handle)
p.pack_int(ln)
p.pack_int(ln)
pad_length = (4-ln) & 3
self.sock.send(p.get_buffer() + seg_data + bs([0])*pad_length)
@wire_operation
def _op_batch_segments(self, blob_handle, seg_data):
ln = len(seg_data)
p = xdrlib.Packer()
p.pack_int(self.op_batch_segments)
p.pack_int(blob_handle)
p.pack_int(ln + 2)
p.pack_int(ln + 2)
pad_length = ((4-(ln+2)) & 3)
self.sock.send(p.get_buffer() + int_to_bytes(ln, 2) + seg_data + bs([0])*pad_length)
@wire_operation
def _op_close_blob(self, blob_handle):
p = xdrlib.Packer()
p.pack_int(self.op_close_blob)
p.pack_int(blob_handle)
self.sock.send(p.get_buffer())
@wire_operation
def _op_que_events(self, event_names, event_id):
if self.db_handle is None:
raise OperationalError('_op_que_events() Invalid db handle')
params = bs([1])
for name, n in event_names.items():
params += bs([len(name)])
params += self.str_to_bytes(name)
params += int_to_bytes(n, 4)
p = xdrlib.Packer()
p.pack_int(self.op_que_events)
p.pack_int(self.db_handle)
p.pack_bytes(params)
p.pack_int(0) # ast
p.pack_int(0) # args
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_cancel_events(self, event_id):
if self.db_handle is None:
raise OperationalError('_op_cancel_events() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_cancel_events)
p.pack_int(self.db_handle)
p.pack_int(event_id)
self.sock.send(p.get_buffer())
@wire_operation
def _op_connect_request(self):
if self.db_handle is None:
raise OperationalError('_op_connect_request() Invalid db handle')
p = xdrlib.Packer()
p.pack_int(self.op_connect_request)
p.pack_int(1) # async
p.pack_int(self.db_handle)
p.pack_int(0)
self.sock.send(p.get_buffer())
@wire_operation
def _op_response(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
while op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
h, oid, buf = self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_cont_auth:
raise OperationalError('Unauthorized')
elif op_code != self.op_response:
raise InternalError("_op_response:op_code = %d" % (op_code,))
return self._parse_op_response()
@wire_operation
def _op_event(self):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code == self.op_response and self.lazy_response_count:
self.lazy_response_count -= 1
self._parse_op_response()
b = self.recv_channel(4)
if op_code == self.op_exit or bytes_to_bint(b) == self.op_exit:
raise DisconnectByPeer
if op_code != self.op_event:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_event:op_code = %d" % (op_code,))
return self._parse_op_event()
@wire_operation
def _op_sql_response(self, xsqlda):
b = self.recv_channel(4)
while bytes_to_bint(b) == self.op_dummy:
b = self.recv_channel(4)
op_code = bytes_to_bint(b)
if op_code != self.op_sql_response:
if op_code == self.op_response:
self._parse_op_response()
raise InternalError("_op_sql_response:op_code = %d" % (op_code,))
b = self.recv_channel(4)
count = bytes_to_bint(b[:4])
r = []
if count == 0:
return []
if self.accept_version < PROTOCOL_VERSION13:
for i in range(len(xsqlda)):
x = xsqlda[i]
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
if self.recv_channel(4) == bs([0]) * 4: # Not NULL
r.append(x.value(raw_value))
else:
r.append(None)
else:
n = len(xsqlda) // 8
if len(xsqlda) % 8 != 0:
n += 1
null_indicator = 0
for c in reversed(self.recv_channel(n, word_alignment=True)):
null_indicator <<= 8
null_indicator += c if PYTHON_MAJOR_VER == 3 else ord(c)
for i in range(len(xsqlda)):
x = xsqlda[i]
if null_indicator & (1 << i):
r.append(None)
else:
if x.io_length() < 0:
b = self.recv_channel(4)
ln = bytes_to_bint(b)
else:
ln = x.io_length()
raw_value = self.recv_channel(ln, word_alignment=True)
r.append(x.value(raw_value))
return r
def _wait_for_event(self, timeout):
event_names = {}
event_id = 0
while True:
b4 = self.recv_channel(4)
if b4 is None:
return None
op_code = bytes_to_bint(b4)
if op_code == self.op_dummy:
pass
elif op_code == self.op_exit or op_code == self.op_disconnect:
break
elif op_code == self.op_event:
bytes_to_int(self.recv_channel(4)) # db_handle
ln = bytes_to_bint(self.recv_channel(4))
b = self.recv_channel(ln, word_alignment=True)
assert byte_to_int(b[0]) == 1
i = 1
while i < len(b):
ln = byte_to_int(b[i])
s = self.connection.bytes_to_str(b[i+1:i+1+ln])
n = bytes_to_int(b[i+1+ln:i+1+ln+4])
event_names[s] = n
i += ln + 5
self.recv_channel(8) # ignore AST info
event_id = bytes_to_bint(self.recv_channel(4))
break
else:
raise InternalError("_wait_for_event:op_code = %d" % (op_code,))
return (event_id, event_names)
|
nakagami/pyfirebirdsql
|
firebirdsql/xsqlvar.py
|
calc_blr
|
python
|
def calc_blr(xsqlda):
"Calculate BLR from XSQLVAR array."
ln = len(xsqlda) * 2
blr = [5, 2, 4, 0, ln & 255, ln >> 8]
for x in xsqlda:
sqltype = x.sqltype
if sqltype == SQL_TYPE_VARYING:
blr += [37, x.sqllen & 255, x.sqllen >> 8]
elif sqltype == SQL_TYPE_TEXT:
blr += [14, x.sqllen & 255, x.sqllen >> 8]
elif sqltype == SQL_TYPE_LONG:
blr += [8, x.sqlscale]
elif sqltype == SQL_TYPE_SHORT:
blr += [7, x.sqlscale]
elif sqltype == SQL_TYPE_INT64:
blr += [16, x.sqlscale]
elif sqltype == SQL_TYPE_QUAD:
blr += [9, x.sqlscale]
elif sqltype == SQL_TYPE_DEC_FIXED:
blr += [26, x.sqlscale]
else:
blr += sqltype2blr[sqltype]
blr += [7, 0] # [blr_short, 0]
blr += [255, 76] # [blr_end, blr_eoc]
# x.sqlscale value shoud be negative, so b convert to range(0, 256)
return bs(256 + b if b < 0 else b for b in blr)
|
Calculate BLR from XSQLVAR array.
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/xsqlvar.py#L216-L242
|
[
"def bs(byte_array):\n if PYTHON_MAJOR_VER == 2:\n return ''.join([chr(c) for c in byte_array])\n else:\n return bytes(byte_array)\n"
] |
##############################################################################
# Copyright (c) 2009-2018, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
import datetime
import decimal
from firebirdsql.consts import *
from firebirdsql.utils import *
from firebirdsql.wireprotocol import INFO_SQL_SELECT_DESCRIBE_VARS
from firebirdsql.tz_utils import get_tzinfo
from firebirdsql import decfloat
class XSQLVAR:
type_length = {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 4,
SQL_TYPE_LONG: 4,
SQL_TYPE_FLOAT: 4,
SQL_TYPE_TIME: 4,
SQL_TYPE_DATE: 4,
SQL_TYPE_DOUBLE: 8,
SQL_TYPE_TIMESTAMP: 8,
SQL_TYPE_BLOB: 8,
SQL_TYPE_ARRAY: 8,
SQL_TYPE_QUAD: 8,
SQL_TYPE_INT64: 8,
SQL_TYPE_TIMESTAMP_TZ: 10,
SQL_TYPE_TIME_TZ: 6,
SQL_TYPE_DEC64 : 8,
SQL_TYPE_DEC128 : 16,
SQL_TYPE_DEC_FIXED: 16,
SQL_TYPE_BOOLEAN: 1,
}
type_display_length = {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 6,
SQL_TYPE_LONG: 11,
SQL_TYPE_FLOAT: 17,
SQL_TYPE_TIME: 11,
SQL_TYPE_DATE: 10,
SQL_TYPE_DOUBLE: 17,
SQL_TYPE_TIMESTAMP: 22,
SQL_TYPE_BLOB: 0,
SQL_TYPE_ARRAY: -1,
SQL_TYPE_QUAD: 20,
SQL_TYPE_INT64: 20,
SQL_TYPE_TIMESTAMP_TZ: 28,
SQL_TYPE_TIME_TZ: 17,
SQL_TYPE_DEC64: 16,
SQL_TYPE_DEC128: 34,
SQL_TYPE_DEC_FIXED: 34,
SQL_TYPE_BOOLEAN: 5,
}
def __init__(self, bytes_to_str):
self.bytes_to_str = bytes_to_str
self.sqltype = None
self.sqlscale = None
self.sqlsubtype = None
self.sqllen = None
self.null_ok = None
self.fieldname = ''
self.relname = ''
self.ownname = ''
self.aliasname = ''
def io_length(self):
sqltype = self.sqltype
if sqltype == SQL_TYPE_TEXT:
return self.sqllen
else:
return self.type_length[sqltype]
def display_length(self):
sqltype = self.sqltype
if sqltype == SQL_TYPE_TEXT:
return self.sqllen
else:
return self.type_display_length[sqltype]
def precision(self):
return self.display_length()
def __str__(self):
s = ','.join([
str(self.sqltype), str(self.sqlscale), str(self.sqlsubtype),
str(self.sqllen), str(self.null_ok), self.fieldname,
self.relname, self.ownname, self.aliasname,
])
return '[' + s + ']'
def _parse_date(self, raw_value):
"Convert raw data to datetime.date"
nday = bytes_to_bint(raw_value) + 678882
century = (4 * nday - 1) // 146097
nday = 4 * nday - 1 - 146097 * century
day = nday // 4
nday = (4 * day + 3) // 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) // 4
month = (5 * day - 3) // 153
day = 5 * day - 3 - 153 * month
day = (day + 5) // 5
year = 100 * century + nday
if month < 10:
month += 3
else:
month -= 9
year += 1
return year, month, day
def _parse_time(self, raw_value):
"Convert raw data to datetime.time"
n = bytes_to_bint(raw_value)
s = n // 10000
m = s // 60
h = m // 60
m = m % 60
s = s % 60
return (h, m, s, (n % 10000) * 100)
def _parse_time_zone(self, raw_value):
return get_tzinfo(bytes_to_uint(raw_value))
def value(self, raw_value):
if self.sqltype == SQL_TYPE_TEXT:
return self.bytes_to_str(raw_value).rstrip()
elif self.sqltype == SQL_TYPE_VARYING:
return self.bytes_to_str(raw_value)
elif self.sqltype in (SQL_TYPE_SHORT, SQL_TYPE_LONG, SQL_TYPE_INT64):
n = bytes_to_bint(raw_value)
if self.sqlscale:
return decimal.Decimal(str(n) + 'e' + str(self.sqlscale))
else:
return n
elif self.sqltype == SQL_TYPE_DATE:
yyyy, mm, dd = self._parse_date(raw_value)
return datetime.date(yyyy, mm, dd)
elif self.sqltype == SQL_TYPE_TIME:
h, m, s, ms = self._parse_time(raw_value)
return datetime.time(h, m, s, ms)
elif self.sqltype == SQL_TYPE_TIMESTAMP:
yyyy, mm, dd = self._parse_date(raw_value[:4])
h, m, s, ms = self._parse_time(raw_value[4:])
return datetime.datetime(yyyy, mm, dd, h, m, s, ms)
elif self.sqltype == SQL_TYPE_FLOAT:
return struct.unpack('!f', raw_value)[0]
elif self.sqltype == SQL_TYPE_DOUBLE:
return struct.unpack('!d', raw_value)[0]
elif self.sqltype == SQL_TYPE_BOOLEAN:
return True if byte_to_int(raw_value[0]) != 0 else False
elif self.sqltype == SQL_TYPE_TIMESTAMP_TZ:
yyyy, mm, dd = self._parse_date(raw_value[:4])
h, m, s, ms = self._parse_time(raw_value[4:8])
tz = self._parse_time_zone(raw_value[8:])
return datetime.datetime(yyyy, mm, dd, h, m, s, ms, tzinfo=tz)
elif self.sqltype == SQL_TYPE_TIME_TZ:
h, m, s, ms = self._parse_time(raw_value[:4])
tz = self._parse_time_zone(raw_value[4:])
return datetime.time(h, m, s, ms, tzinfo=tz)
elif self.sqltype == SQL_TYPE_DEC_FIXED:
return decfloat.decimal_fixed_to_decimal(raw_value, self.sqlscale)
elif self.sqltype == SQL_TYPE_DEC64:
return decfloat.decimal64_to_decimal(raw_value)
elif self.sqltype == SQL_TYPE_DEC128:
return decfloat.decimal128_to_decimal(raw_value)
else:
return raw_value
sqltype2blr = {
SQL_TYPE_DOUBLE: [27],
SQL_TYPE_FLOAT: [10],
SQL_TYPE_D_FLOAT: [11],
SQL_TYPE_DATE: [12],
SQL_TYPE_TIME: [13],
SQL_TYPE_TIMESTAMP: [35],
SQL_TYPE_BLOB: [9, 0],
SQL_TYPE_ARRAY: [9, 0],
SQL_TYPE_BOOLEAN: [23],
SQL_TYPE_DEC64: [24],
SQL_TYPE_DEC128: [25],
SQL_TYPE_TIME_TZ: [28],
SQL_TYPE_TIMESTAMP_TZ: [29],
}
def parse_select_items(buf, xsqlda, connection):
index = 0
i = 0
item = byte_to_int(buf[i])
while item != isc_info_end:
if item == isc_info_sql_sqlda_seq:
l = bytes_to_int(buf[i+1:i+3])
index = bytes_to_int(buf[i+3:i+3+l])
xsqlda[index-1] = XSQLVAR(connection.bytes_to_ustr if connection.use_unicode else connection.bytes_to_str)
i = i + 3 + l
elif item == isc_info_sql_type:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].sqltype = bytes_to_int(buf[i+3:i+3+l]) & ~ 1
i = i + 3 + l
elif item == isc_info_sql_sub_type:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].sqlsubtype = bytes_to_int(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_scale:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].sqlscale = bytes_to_int(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_length:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].sqllen = bytes_to_int(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_null_ind:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].null_ok = bytes_to_int(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_field:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].fieldname = connection.bytes_to_str(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_relation:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].relname = connection.bytes_to_str(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_owner:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].ownname = connection.bytes_to_str(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_sql_alias:
l = bytes_to_int(buf[i+1:i+3])
xsqlda[index-1].aliasname = connection.bytes_to_str(buf[i+3:i+3+l])
i = i + 3 + l
elif item == isc_info_truncated:
return index # return next index
elif item == isc_info_sql_describe_end:
i = i + 1
else:
print('\t', item, 'Invalid item [%02x] ! i=%d' % (buf[i], i))
i = i + 1
item = byte_to_int(buf[i])
return -1 # no more info
def parse_xsqlda(buf, connection, stmt_handle):
xsqlda = []
stmt_type = None
i = 0
while i < len(buf):
if buf[i:i+3] == bs([isc_info_sql_stmt_type, 0x04, 0x00]):
stmt_type = bytes_to_int(buf[i+3:i+7])
i += 7
elif buf[i:i+2] == bs([isc_info_sql_select, isc_info_sql_describe_vars]):
i += 2
l = bytes_to_int(buf[i:i+2])
i += 2
col_len = bytes_to_int(buf[i:i+l])
xsqlda = [None] * col_len
next_index = parse_select_items(buf[i+l:], xsqlda, connection)
while next_index > 0: # more describe vars
connection._op_info_sql(
stmt_handle,
bs([isc_info_sql_sqlda_start, 2]) + int_to_bytes(next_index, 2) + INFO_SQL_SELECT_DESCRIBE_VARS
)
(h, oid, buf) = connection._op_response()
assert buf[:2] == bs([0x04, 0x07])
l = bytes_to_int(buf[2:4])
assert bytes_to_int(buf[4:4+l]) == col_len
next_index = parse_select_items(buf[4+l:], xsqlda, connection)
else:
break
return stmt_type, xsqlda
|
nakagami/pyfirebirdsql
|
firebirdsql/xsqlvar.py
|
XSQLVAR._parse_date
|
python
|
def _parse_date(self, raw_value):
"Convert raw data to datetime.date"
nday = bytes_to_bint(raw_value) + 678882
century = (4 * nday - 1) // 146097
nday = 4 * nday - 1 - 146097 * century
day = nday // 4
nday = (4 * day + 3) // 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) // 4
month = (5 * day - 3) // 153
day = 5 * day - 3 - 153 * month
day = (day + 5) // 5
year = 100 * century + nday
if month < 10:
month += 3
else:
month -= 9
year += 1
return year, month, day
|
Convert raw data to datetime.date
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/xsqlvar.py#L118-L138
|
[
"def bytes_to_bint(b, u=False): # Read as big endian\n if u:\n fmtmap = {1: 'B', 2: '>H', 4: '>L', 8: '>Q'}\n else:\n fmtmap = {1: 'b', 2: '>h', 4: '>l', 8: '>q'}\n fmt = fmtmap.get(len(b))\n if fmt is None:\n raise InternalError(\"Invalid bytes length:%d\" % (len(b), ))\n return struct.unpack(fmt, b)[0]\n"
] |
class XSQLVAR:
type_length = {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 4,
SQL_TYPE_LONG: 4,
SQL_TYPE_FLOAT: 4,
SQL_TYPE_TIME: 4,
SQL_TYPE_DATE: 4,
SQL_TYPE_DOUBLE: 8,
SQL_TYPE_TIMESTAMP: 8,
SQL_TYPE_BLOB: 8,
SQL_TYPE_ARRAY: 8,
SQL_TYPE_QUAD: 8,
SQL_TYPE_INT64: 8,
SQL_TYPE_TIMESTAMP_TZ: 10,
SQL_TYPE_TIME_TZ: 6,
SQL_TYPE_DEC64 : 8,
SQL_TYPE_DEC128 : 16,
SQL_TYPE_DEC_FIXED: 16,
SQL_TYPE_BOOLEAN: 1,
}
type_display_length = {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 6,
SQL_TYPE_LONG: 11,
SQL_TYPE_FLOAT: 17,
SQL_TYPE_TIME: 11,
SQL_TYPE_DATE: 10,
SQL_TYPE_DOUBLE: 17,
SQL_TYPE_TIMESTAMP: 22,
SQL_TYPE_BLOB: 0,
SQL_TYPE_ARRAY: -1,
SQL_TYPE_QUAD: 20,
SQL_TYPE_INT64: 20,
SQL_TYPE_TIMESTAMP_TZ: 28,
SQL_TYPE_TIME_TZ: 17,
SQL_TYPE_DEC64: 16,
SQL_TYPE_DEC128: 34,
SQL_TYPE_DEC_FIXED: 34,
SQL_TYPE_BOOLEAN: 5,
}
def __init__(self, bytes_to_str):
self.bytes_to_str = bytes_to_str
self.sqltype = None
self.sqlscale = None
self.sqlsubtype = None
self.sqllen = None
self.null_ok = None
self.fieldname = ''
self.relname = ''
self.ownname = ''
self.aliasname = ''
def io_length(self):
sqltype = self.sqltype
if sqltype == SQL_TYPE_TEXT:
return self.sqllen
else:
return self.type_length[sqltype]
def display_length(self):
sqltype = self.sqltype
if sqltype == SQL_TYPE_TEXT:
return self.sqllen
else:
return self.type_display_length[sqltype]
def precision(self):
return self.display_length()
def __str__(self):
s = ','.join([
str(self.sqltype), str(self.sqlscale), str(self.sqlsubtype),
str(self.sqllen), str(self.null_ok), self.fieldname,
self.relname, self.ownname, self.aliasname,
])
return '[' + s + ']'
def _parse_time(self, raw_value):
"Convert raw data to datetime.time"
n = bytes_to_bint(raw_value)
s = n // 10000
m = s // 60
h = m // 60
m = m % 60
s = s % 60
return (h, m, s, (n % 10000) * 100)
def _parse_time_zone(self, raw_value):
return get_tzinfo(bytes_to_uint(raw_value))
def value(self, raw_value):
if self.sqltype == SQL_TYPE_TEXT:
return self.bytes_to_str(raw_value).rstrip()
elif self.sqltype == SQL_TYPE_VARYING:
return self.bytes_to_str(raw_value)
elif self.sqltype in (SQL_TYPE_SHORT, SQL_TYPE_LONG, SQL_TYPE_INT64):
n = bytes_to_bint(raw_value)
if self.sqlscale:
return decimal.Decimal(str(n) + 'e' + str(self.sqlscale))
else:
return n
elif self.sqltype == SQL_TYPE_DATE:
yyyy, mm, dd = self._parse_date(raw_value)
return datetime.date(yyyy, mm, dd)
elif self.sqltype == SQL_TYPE_TIME:
h, m, s, ms = self._parse_time(raw_value)
return datetime.time(h, m, s, ms)
elif self.sqltype == SQL_TYPE_TIMESTAMP:
yyyy, mm, dd = self._parse_date(raw_value[:4])
h, m, s, ms = self._parse_time(raw_value[4:])
return datetime.datetime(yyyy, mm, dd, h, m, s, ms)
elif self.sqltype == SQL_TYPE_FLOAT:
return struct.unpack('!f', raw_value)[0]
elif self.sqltype == SQL_TYPE_DOUBLE:
return struct.unpack('!d', raw_value)[0]
elif self.sqltype == SQL_TYPE_BOOLEAN:
return True if byte_to_int(raw_value[0]) != 0 else False
elif self.sqltype == SQL_TYPE_TIMESTAMP_TZ:
yyyy, mm, dd = self._parse_date(raw_value[:4])
h, m, s, ms = self._parse_time(raw_value[4:8])
tz = self._parse_time_zone(raw_value[8:])
return datetime.datetime(yyyy, mm, dd, h, m, s, ms, tzinfo=tz)
elif self.sqltype == SQL_TYPE_TIME_TZ:
h, m, s, ms = self._parse_time(raw_value[:4])
tz = self._parse_time_zone(raw_value[4:])
return datetime.time(h, m, s, ms, tzinfo=tz)
elif self.sqltype == SQL_TYPE_DEC_FIXED:
return decfloat.decimal_fixed_to_decimal(raw_value, self.sqlscale)
elif self.sqltype == SQL_TYPE_DEC64:
return decfloat.decimal64_to_decimal(raw_value)
elif self.sqltype == SQL_TYPE_DEC128:
return decfloat.decimal128_to_decimal(raw_value)
else:
return raw_value
|
nakagami/pyfirebirdsql
|
firebirdsql/xsqlvar.py
|
XSQLVAR._parse_time
|
python
|
def _parse_time(self, raw_value):
"Convert raw data to datetime.time"
n = bytes_to_bint(raw_value)
s = n // 10000
m = s // 60
h = m // 60
m = m % 60
s = s % 60
return (h, m, s, (n % 10000) * 100)
|
Convert raw data to datetime.time
|
train
|
https://github.com/nakagami/pyfirebirdsql/blob/5ce366c2fc8318510444f4a89801442f3e9e52ca/firebirdsql/xsqlvar.py#L140-L148
|
[
"def bytes_to_bint(b, u=False): # Read as big endian\n if u:\n fmtmap = {1: 'B', 2: '>H', 4: '>L', 8: '>Q'}\n else:\n fmtmap = {1: 'b', 2: '>h', 4: '>l', 8: '>q'}\n fmt = fmtmap.get(len(b))\n if fmt is None:\n raise InternalError(\"Invalid bytes length:%d\" % (len(b), ))\n return struct.unpack(fmt, b)[0]\n"
] |
class XSQLVAR:
type_length = {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 4,
SQL_TYPE_LONG: 4,
SQL_TYPE_FLOAT: 4,
SQL_TYPE_TIME: 4,
SQL_TYPE_DATE: 4,
SQL_TYPE_DOUBLE: 8,
SQL_TYPE_TIMESTAMP: 8,
SQL_TYPE_BLOB: 8,
SQL_TYPE_ARRAY: 8,
SQL_TYPE_QUAD: 8,
SQL_TYPE_INT64: 8,
SQL_TYPE_TIMESTAMP_TZ: 10,
SQL_TYPE_TIME_TZ: 6,
SQL_TYPE_DEC64 : 8,
SQL_TYPE_DEC128 : 16,
SQL_TYPE_DEC_FIXED: 16,
SQL_TYPE_BOOLEAN: 1,
}
type_display_length = {
SQL_TYPE_VARYING: -1,
SQL_TYPE_SHORT: 6,
SQL_TYPE_LONG: 11,
SQL_TYPE_FLOAT: 17,
SQL_TYPE_TIME: 11,
SQL_TYPE_DATE: 10,
SQL_TYPE_DOUBLE: 17,
SQL_TYPE_TIMESTAMP: 22,
SQL_TYPE_BLOB: 0,
SQL_TYPE_ARRAY: -1,
SQL_TYPE_QUAD: 20,
SQL_TYPE_INT64: 20,
SQL_TYPE_TIMESTAMP_TZ: 28,
SQL_TYPE_TIME_TZ: 17,
SQL_TYPE_DEC64: 16,
SQL_TYPE_DEC128: 34,
SQL_TYPE_DEC_FIXED: 34,
SQL_TYPE_BOOLEAN: 5,
}
def __init__(self, bytes_to_str):
self.bytes_to_str = bytes_to_str
self.sqltype = None
self.sqlscale = None
self.sqlsubtype = None
self.sqllen = None
self.null_ok = None
self.fieldname = ''
self.relname = ''
self.ownname = ''
self.aliasname = ''
def io_length(self):
sqltype = self.sqltype
if sqltype == SQL_TYPE_TEXT:
return self.sqllen
else:
return self.type_length[sqltype]
def display_length(self):
sqltype = self.sqltype
if sqltype == SQL_TYPE_TEXT:
return self.sqllen
else:
return self.type_display_length[sqltype]
def precision(self):
return self.display_length()
def __str__(self):
s = ','.join([
str(self.sqltype), str(self.sqlscale), str(self.sqlsubtype),
str(self.sqllen), str(self.null_ok), self.fieldname,
self.relname, self.ownname, self.aliasname,
])
return '[' + s + ']'
def _parse_date(self, raw_value):
"Convert raw data to datetime.date"
nday = bytes_to_bint(raw_value) + 678882
century = (4 * nday - 1) // 146097
nday = 4 * nday - 1 - 146097 * century
day = nday // 4
nday = (4 * day + 3) // 1461
day = 4 * day + 3 - 1461 * nday
day = (day + 4) // 4
month = (5 * day - 3) // 153
day = 5 * day - 3 - 153 * month
day = (day + 5) // 5
year = 100 * century + nday
if month < 10:
month += 3
else:
month -= 9
year += 1
return year, month, day
def _parse_time_zone(self, raw_value):
return get_tzinfo(bytes_to_uint(raw_value))
def value(self, raw_value):
if self.sqltype == SQL_TYPE_TEXT:
return self.bytes_to_str(raw_value).rstrip()
elif self.sqltype == SQL_TYPE_VARYING:
return self.bytes_to_str(raw_value)
elif self.sqltype in (SQL_TYPE_SHORT, SQL_TYPE_LONG, SQL_TYPE_INT64):
n = bytes_to_bint(raw_value)
if self.sqlscale:
return decimal.Decimal(str(n) + 'e' + str(self.sqlscale))
else:
return n
elif self.sqltype == SQL_TYPE_DATE:
yyyy, mm, dd = self._parse_date(raw_value)
return datetime.date(yyyy, mm, dd)
elif self.sqltype == SQL_TYPE_TIME:
h, m, s, ms = self._parse_time(raw_value)
return datetime.time(h, m, s, ms)
elif self.sqltype == SQL_TYPE_TIMESTAMP:
yyyy, mm, dd = self._parse_date(raw_value[:4])
h, m, s, ms = self._parse_time(raw_value[4:])
return datetime.datetime(yyyy, mm, dd, h, m, s, ms)
elif self.sqltype == SQL_TYPE_FLOAT:
return struct.unpack('!f', raw_value)[0]
elif self.sqltype == SQL_TYPE_DOUBLE:
return struct.unpack('!d', raw_value)[0]
elif self.sqltype == SQL_TYPE_BOOLEAN:
return True if byte_to_int(raw_value[0]) != 0 else False
elif self.sqltype == SQL_TYPE_TIMESTAMP_TZ:
yyyy, mm, dd = self._parse_date(raw_value[:4])
h, m, s, ms = self._parse_time(raw_value[4:8])
tz = self._parse_time_zone(raw_value[8:])
return datetime.datetime(yyyy, mm, dd, h, m, s, ms, tzinfo=tz)
elif self.sqltype == SQL_TYPE_TIME_TZ:
h, m, s, ms = self._parse_time(raw_value[:4])
tz = self._parse_time_zone(raw_value[4:])
return datetime.time(h, m, s, ms, tzinfo=tz)
elif self.sqltype == SQL_TYPE_DEC_FIXED:
return decfloat.decimal_fixed_to_decimal(raw_value, self.sqlscale)
elif self.sqltype == SQL_TYPE_DEC64:
return decfloat.decimal64_to_decimal(raw_value)
elif self.sqltype == SQL_TYPE_DEC128:
return decfloat.decimal128_to_decimal(raw_value)
else:
return raw_value
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/__init__.py
|
ensure_dir
|
python
|
def ensure_dir(dirname):
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise
|
Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L26-L37
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Roy Wallace <roy.wallace@idiap.ch>
# Elie Khoury <Elie.Khoury@idiap.ch>
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .extraction import *
import os
import numpy
def convertScoreToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreDictToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreListToList(scores, probes):
ret = []
i = 0
for p in probes:
ret.append((p[1], p[2], p[3], p[4], scores[i]))
i+=1
return ret
def probes_used_generate_vector(probe_files_full, probe_files_model):
"""Generates boolean matrices indicating which are the probes for each model"""
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
def read(filename):
"""Read audio file"""
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data
def normalize_std_array(vector):
"""Applies a unit mean and variance normalization to an arrayset"""
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/__init__.py
|
probes_used_generate_vector
|
python
|
def probes_used_generate_vector(probe_files_full, probe_files_model):
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed
|
Generates boolean matrices indicating which are the probes for each model
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L66-L75
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Roy Wallace <roy.wallace@idiap.ch>
# Elie Khoury <Elie.Khoury@idiap.ch>
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .extraction import *
import os
import numpy
def ensure_dir(dirname):
""" Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. """
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise
def convertScoreToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreDictToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreListToList(scores, probes):
ret = []
i = 0
for p in probes:
ret.append((p[1], p[2], p[3], p[4], scores[i]))
i+=1
return ret
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
def read(filename):
"""Read audio file"""
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data
def normalize_std_array(vector):
"""Applies a unit mean and variance normalization to an arrayset"""
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/__init__.py
|
probes_used_extract_scores
|
python
|
def probes_used_extract_scores(full_scores, same_probes):
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
|
Extracts a matrix of scores for a model, given a probes_used row vector of boolean
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L77-L88
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Roy Wallace <roy.wallace@idiap.ch>
# Elie Khoury <Elie.Khoury@idiap.ch>
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .extraction import *
import os
import numpy
def ensure_dir(dirname):
""" Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. """
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise
def convertScoreToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreDictToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreListToList(scores, probes):
ret = []
i = 0
for p in probes:
ret.append((p[1], p[2], p[3], p[4], scores[i]))
i+=1
return ret
def probes_used_generate_vector(probe_files_full, probe_files_model):
"""Generates boolean matrices indicating which are the probes for each model"""
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed
def read(filename):
"""Read audio file"""
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data
def normalize_std_array(vector):
"""Applies a unit mean and variance normalization to an arrayset"""
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/__init__.py
|
read
|
python
|
def read(filename):
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data
|
Read audio file
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L91-L105
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Roy Wallace <roy.wallace@idiap.ch>
# Elie Khoury <Elie.Khoury@idiap.ch>
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .extraction import *
import os
import numpy
def ensure_dir(dirname):
""" Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. """
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise
def convertScoreToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreDictToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreListToList(scores, probes):
ret = []
i = 0
for p in probes:
ret.append((p[1], p[2], p[3], p[4], scores[i]))
i+=1
return ret
def probes_used_generate_vector(probe_files_full, probe_files_model):
"""Generates boolean matrices indicating which are the probes for each model"""
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
def normalize_std_array(vector):
"""Applies a unit mean and variance normalization to an arrayset"""
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/__init__.py
|
normalize_std_array
|
python
|
def normalize_std_array(vector):
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset
|
Applies a unit mean and variance normalization to an arrayset
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L109-L134
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Roy Wallace <roy.wallace@idiap.ch>
# Elie Khoury <Elie.Khoury@idiap.ch>
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .extraction import *
import os
import numpy
def ensure_dir(dirname):
""" Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. """
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise
def convertScoreToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreDictToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreListToList(scores, probes):
ret = []
i = 0
for p in probes:
ret.append((p[1], p[2], p[3], p[4], scores[i]))
i+=1
return ret
def probes_used_generate_vector(probe_files_full, probe_files_model):
"""Generates boolean matrices indicating which are the probes for each model"""
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
def read(filename):
"""Read audio file"""
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/__init__.py
|
smoothing
|
python
|
def smoothing(labels, smoothing_window):
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels
|
Applies a smoothing on VAD
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/__init__.py#L137-L198
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
# Roy Wallace <roy.wallace@idiap.ch>
# Elie Khoury <Elie.Khoury@idiap.ch>
#
# Copyright (C) 2012-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .extraction import *
import os
import numpy
def ensure_dir(dirname):
""" Creates the directory dirname if it does not already exist,
taking into account concurrent 'creation' on the grid.
An exception is thrown if a file (rather than a directory) already
exists. """
try:
# Tries to create the directory
os.makedirs(dirname)
except OSError:
# Check that the directory exists
if os.path.isdir(dirname): pass
else: raise
def convertScoreToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreDictToList(scores, probes):
ret = []
i = 0
for k in sorted(probes):
ret.append((probes[k][1], probes[k][2], probes[k][3], probes[k][4], scores[i]))
i+=1
return ret
def convertScoreListToList(scores, probes):
ret = []
i = 0
for p in probes:
ret.append((p[1], p[2], p[3], p[4], scores[i]))
i+=1
return ret
def probes_used_generate_vector(probe_files_full, probe_files_model):
"""Generates boolean matrices indicating which are the probes for each model"""
import numpy as np
C_probesUsed = np.ndarray((len(probe_files_full),), 'bool')
C_probesUsed.fill(False)
c=0
for k in sorted(probe_files_full.keys()):
if probe_files_model.has_key(k): C_probesUsed[c] = True
c+=1
return C_probesUsed
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
def read(filename):
"""Read audio file"""
# Depricated: use load() function from bob.bio.spear.database.AudioBioFile
#TODO: update xbob.sox first. This will enable the use of formats like NIST sphere and other
#import xbob.sox
#audio = xbob.sox.reader(filename)
#(rate, data) = audio.load()
# We consider there is only 1 channel in the audio file => data[0]
#data= numpy.cast['float'](data[0]*pow(2,15)) # pow(2,15) is used to get the same native format as for scipy.io.wavfile.read
import scipy.io.wavfile
rate, audio = scipy.io.wavfile.read(filename)
# We consider there is only 1 channel in the audio file => data[0]
data= numpy.cast['float'](audio)
return rate, data
def normalize_std_array(vector):
"""Applies a unit mean and variance normalization to an arrayset"""
# Initializes variables
length = 1
n_samples = len(vector)
mean = numpy.ndarray((length,), 'float64')
std = numpy.ndarray((length,), 'float64')
mean.fill(0)
std.fill(0)
# Computes mean and variance
for array in vector:
x = array.astype('float64')
mean += x
std += (x ** 2)
mean /= n_samples
std /= n_samples
std -= (mean ** 2)
std = std ** 0.5
arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64)
for i in range (0, n_samples):
arrayset[i,:] = (vector[i]-mean) / std
return arrayset
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
|
bioidiap/bob.bio.spear
|
bob/bio/spear/preprocessor/External.py
|
External._conversion
|
python
|
def _conversion(self, input_signal, vad_file):
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self.use_existing_vad(energy_array, vad_file)
return labels
|
Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/preprocessor/External.py#L65-L75
| null |
class External(Base):
"""Uses external VAD and converts it to fit the format used by Spear"""
def __init__(
self,
win_length_ms = 20., # 20 ms
win_shift_ms = 10., # 10 ms
**kwargs
):
# call base class constructor with its set of parameters
Preprocessor.__init__(
self,
win_length_ms = win_length_ms,
win_shift_ms = win_shift_ms,
)
# copy parameters
self.win_length_ms = win_length_ms
self.win_shift_ms = win_shift_ms
def use_existing_vad(self, inArr, vad_file):
f=open(vad_file)
n_samples = len(inArr)
labels = numpy.array(numpy.zeros(n_samples), dtype=numpy.int16)
ns=0
for line in f:
line = line.strip()
st_frame = float(line.split(' ')[2])
en_frame = float(line.split(' ')[4])
st_frame = min(int(st_frame * 100), n_samples)
st_frame = max(st_frame, 0)
en_frame = min(int(en_frame * 100), n_samples)
en_frame = max(en_frame, 0)
for i in range(st_frame, en_frame):
labels[i]=1
return labels
def _conversion(self, input_signal, vad_file):
"""
Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes.
"""
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self.use_existing_vad(energy_array, vad_file)
return labels
def __call__(self, input_signal, annotations=None):
"""labels speech (1) and non-speech (0) parts using an external VAD segmentation
Input parameter:
* input_signal[0] --> rate
* input_signal[1] --> signal
* annotations --> the external VAD annotations
"""
labels = self._conversion(input_signal, annotations)
rate = input_signal[0]
data = input_signal[1]
return rate, data, labels
|
bioidiap/bob.bio.spear
|
bob/bio/spear/preprocessor/Mod_4Hz.py
|
Mod_4Hz.mod_4hz
|
python
|
def mod_4hz(self, rate_wavsample):
# Set parameters
wl = self.win_length_ms
ws = self.win_shift_ms
nf = self.n_filters
f_min = self.f_min
f_max = self.f_max
pre = self.pre_emphasis_coef
c = bob.ap.Spectrogram(rate_wavsample[0], wl, ws, nf, f_min, f_max, pre)
c.energy_filter=True
c.log_filter=False
c.energy_bands=True
sig = rate_wavsample[1]
energy_bands = c(sig)
filtering_res = self.pass_band_filtering(energy_bands, rate_wavsample[0])
mod_4hz = self.modulation_4hz(filtering_res, rate_wavsample)
mod_4hz = self.averaging(mod_4hz)
e = bob.ap.Energy(rate_wavsample[0], wl, ws)
energy_array = e(rate_wavsample[1])
labels = self._voice_activity_detection(energy_array, mod_4hz)
labels = utils.smoothing(labels,self.smoothing_window) # discard isolated speech less than 100ms
logger.info("After Mod-4Hz based VAD there are %d frames remaining over %d", numpy.sum(labels), len(labels))
return labels, energy_array, mod_4hz
|
Computes and returns the 4Hz modulation energy features for the given input wave file
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/preprocessor/Mod_4Hz.py#L168-L194
|
[
"def smoothing(labels, smoothing_window):\n \"\"\" Applies a smoothing on VAD\"\"\"\n\n if numpy.sum(labels)< smoothing_window:\n return labels\n segments = []\n for k in range(1,len(labels)-1):\n if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :\n labels[k]=1\n for k in range(1,len(labels)-1):\n if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :\n labels[k]=0\n\n seg = numpy.array([0,0,labels[0]])\n for k in range(1,len(labels)):\n if labels[k] != labels[k-1]:\n seg[1]=k-1\n segments.append(seg)\n seg = numpy.array([k,k,labels[k]])\n seg[1]=len(labels)-1\n segments.append(seg)\n\n if len(segments) < 2:\n return labels\n\n curr = segments[0]\n next = segments[1]\n\n # Look at the first segment. If it's short enough, just change its labels\n if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:\n if curr[2]==1:\n labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)\n curr[2]=0\n else: #curr[2]==0\n labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)\n curr[2]=1\n\n for k in range(1,len(segments)-1):\n prev = segments[k-1]\n curr = segments[k]\n next = segments[k+1]\n\n if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:\n if curr[2]==1:\n labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)\n curr[2]=0\n else: #curr[2]==0\n labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)\n curr[2]=1\n\n prev = segments[-2]\n curr = segments[-1]\n\n if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:\n if curr[2]==1:\n labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)\n curr[2]=0\n else: #if curr[2]==0\n labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)\n curr[2]=1\n\n return labels\n"
] |
class Mod_4Hz(Base):
"""VAD based on the modulation of the energy around 4 Hz and the energy """
def __init__(
self,
max_iterations = 10, # 10 iterations for the
convergence_threshold = 0.0005,
variance_threshold = 0.0005,
win_length_ms = 20., # 20 ms
win_shift_ms = 10., # 10 ms
smoothing_window = 10, # 10 frames (i.e. 100 ms)
n_filters = 40,
f_min = 0.0, # 0 Hz
f_max = 4000, # 4 KHz
pre_emphasis_coef = 1.0,
ratio_threshold = 0.1, # 0.1 of the maximum energy
**kwargs
):
# call base class constructor with its set of parameters
Preprocessor.__init__(
self,
max_iterations = max_iterations,
convergence_threshold = convergence_threshold,
variance_threshold = variance_threshold,
win_length_ms = win_length_ms,
win_shift_ms = win_shift_ms,
smoothing_window = smoothing_window,
n_filters = n_filters,
f_min = f_min,
f_max = f_max,
pre_emphasis_coef = pre_emphasis_coef,
ratio_threshold = ratio_threshold,
)
# copy parameters
self.max_iterations = max_iterations
self.convergence_threshold = convergence_threshold
self.variance_threshold = variance_threshold
self.win_length_ms = win_length_ms
self.win_shift_ms = win_shift_ms
self.smoothing_window = smoothing_window
self.n_filters = n_filters
self.f_min = f_min
self.f_max = f_max
self.pre_emphasis_coef = pre_emphasis_coef
self.ratio_threshold = ratio_threshold
def _voice_activity_detection(self, energy, mod_4hz):
n_samples = len(energy)
threshold = numpy.max(energy) - numpy.log((1./self.ratio_threshold) * (1./self.ratio_threshold))
labels = numpy.array(numpy.zeros(n_samples), dtype=numpy.int16)
# if energy does not change a lot, it's not audio maybe?
if numpy.std(energy) < 10e-5:
return labels * 0
for i in range(n_samples):
if ( energy[i] > threshold and mod_4hz[i] > 0.9 ):
labels[i]=1
# If speech part less then 10 seconds and less than the half of the segment duration, try to find speech with more risk
if numpy.sum(labels) < 2000 and float(numpy.sum(labels)) / float(len(labels)) < 0.5:
# TRY WITH MORE RISK 1...
for i in range(n_samples):
if ( energy[i] > threshold and mod_4hz[i] > 0.5 ):
labels[i]=1
if numpy.sum(labels) < 2000 and float(numpy.sum(labels)) / float(len(labels)) < 0.5:
# TRY WITH MORE RISK 2...
for i in range(n_samples):
if ( energy[i] > threshold and mod_4hz[i] > 0.2 ):
labels[i]=1
if numpy.sum(labels) < 2000 and float(numpy.sum(labels)) / float(len(labels)) < 0.5: # This is special for short segments (less than 2s)...
# TRY WITH MORE RISK 3...
if (len(energy) < 200 ) or (numpy.sum(labels) == 0) or (numpy.mean(labels)<0.025):
for i in range(n_samples):
if ( energy[i] > threshold ):
labels[i]=1
return labels
def averaging(self, list_1s_shift):
len_list=len(list_1s_shift)
sample_level_value = numpy.array(numpy.zeros(len_list, dtype=numpy.float))
sample_level_value[0]=numpy.array(list_1s_shift[0])
for j in range(2, numpy.min([len_list, 100])):
sample_level_value[j-1]=((j-1.0)/j)*sample_level_value[j-2] +(1.0/j)*numpy.array(list_1s_shift[j-1])
for j in range(numpy.min([len_list, 100]), len_list-100 +1):
sample_level_value[j-1]=numpy.array(numpy.mean(list_1s_shift[j-100:j]))
sample_level_value[len_list-1] = list_1s_shift[len_list -1]
for j in range(2, numpy.min([len_list, 100]) + 1):
sample_level_value[len_list-j]=((j-1.0)/j)*sample_level_value[len_list+1-j] +(1.0/j)*numpy.array(list_1s_shift[len_list-j])
return sample_level_value
def bandpass_firwin(self, ntaps, lowcut, highcut, fs, window='hamming'):
nyq = 0.5 * fs
taps = scipy.signal.firwin(ntaps, [lowcut, highcut], nyq=nyq, pass_zero=False,
window=window, scale=True)
return taps
def pass_band_filtering(self, energy_bands, fs):
energy_bands = energy_bands.T
order = 8
Wo = 4.
num_taps = self.bandpass_firwin(order+1, (Wo - 0.5), (Wo + 0.5), fs)
res = scipy.signal.lfilter(num_taps, 1.0, energy_bands)
return res
def modulation_4hz(self, filtering_res, rate_wavsample):
fs = rate_wavsample[0]
win_length = int (fs * self.win_length_ms / 1000)
win_shift = int (fs * self.win_shift_ms / 1000)
Energy = filtering_res.sum(axis=0)
mean_Energy = numpy.mean(Energy)
Energy = Energy/mean_Energy
win_size = int (2.0 ** math.ceil(math.log(win_length) / math.log(2)))
n_frames = 1 + (rate_wavsample[1].shape[0] - win_length) // win_shift
range_modulation = int(fs/win_length) # This corresponds to 1 sec
res = numpy.zeros(n_frames)
if n_frames < range_modulation:
return res
for w in range(0,n_frames-range_modulation):
E_range=Energy[w:w+range_modulation] # computes the modulation every 10 ms
if (E_range<=0.).any():
res[w] = 0
else:
res[w] = numpy.var(numpy.log(E_range))
res[n_frames-range_modulation:n_frames] = res[n_frames-range_modulation-1]
return res
def __call__(self, input_signal, annotations=None):
"""labels speech (1) and non-speech (0) parts of the given input wave file using 4Hz modulation energy and energy
Input parameter:
* input_signal[0] --> rate
* input_signal[1] --> signal
"""
[labels, energy_array, mod_4hz] = self.mod_4hz(input_signal)
rate = input_signal[0]
data = input_signal[1]
if (labels == 0).all():
logger.warn("No Audio was detected in the sample!")
return None
return rate, data, labels
|
bioidiap/bob.bio.spear
|
bob/bio/spear/extractor/CQCCFeatures.py
|
CQCCFeatures.read_matlab_files
|
python
|
def read_matlab_files(self, biofile, directory, extension):
import bob.io.matlab
# return the numpy array read from the data_file
data_path = biofile.make_path(directory, extension)
return bob.io.base.load(data_path)
|
Read pre-computed CQCC Matlab features here
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/extractor/CQCCFeatures.py#L45-L52
| null |
class CQCCFeatures(Preprocessor, Extractor):
"""
This class should be used as a preprocessor (converts matlab data into HDF5) and an extractor (reads saved data)
Converts pre-computed with Matlab CQCC features into numpy array suitable for Bob-based experiments.
CQCC features are obtained using CQCC Matlab toolkit from http://audio.eurecom.fr/content/software
The features are originally proposed in the following paper:
Todisco, Massimiliano; Delgado, Héctor; Evans, Nicholas
"Articulation rate filtering of CQCC features for automatic speaker verification", INTERSPEECH 2016,
Annual Conference of the International Speech Communication Association, September 8-12, 2016, San Francisco, USA
"""
def __init__(
self,
split_training_data_by_client=False,
features_mask=numpy.zeros(90), # mask of which features to read
**kwargs
):
# call base class constructor with its set of parameters
Preprocessor.__init__(self, read_original_data=self.read_matlab_files, **kwargs)
Extractor.__init__(self, requires_training=False, split_training_data_by_client=split_training_data_by_client,
**kwargs)
self.features_mask = features_mask
def __call__(self, input_data, annotations):
"""
When this function is called in the capacity of Preprocessor, we apply feature mask to the features.
When it is called as an Extractor, we assume that we have correct CQCC features already,
so we can pass them on to the classifier
"""
features = input_data
# features mask cannot be large the the features themselves
assert(self.features_mask.shape[0] < input_data.shape[0])
if self.features_mask.shape[0] < input_data.shape[0]: # apply the mask
features = input_data[self.features_mask]
# transpose, because of the way original Matlab-based features are computed
return numpy.transpose(features)
|
bioidiap/bob.bio.spear
|
bob/bio/spear/preprocessor/Base.py
|
Base.write_data
|
python
|
def write_data(self, data, data_file, compression=0):
f = bob.io.base.HDF5File(data_file, 'w')
f.set("rate", data[0], compression=compression)
f.set("data", data[1], compression=compression)
f.set("labels", data[2], compression=compression)
|
Writes the given *preprocessed* data to a file with the given name.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/preprocessor/Base.py#L18-L24
| null |
class Base (Preprocessor):
"""Performs color space adaptations and data type corrections for the given image"""
def __init__(self, **kwargs):
Preprocessor.__init__(self, **kwargs)
# Each class needs to have a constructor taking
# all the parameters that are required for the preprocessing as arguments
self._kwargs = kwargs
pass
def read_data(self, data_file):
f= bob.io.base.HDF5File(data_file)
rate = f.read("rate")
data = f.read("data")
labels = f.read("labels")
return rate, data, labels
|
bioidiap/bob.bio.spear
|
bob/bio/spear/script/baselines.py
|
command_line_arguments
|
python
|
def command_line_arguments(command_line_parameters):
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('gmm-voxforge',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'voxforge', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_false', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
|
Defines the command line parameters that are accepted.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/script/baselines.py#L38-L78
| null |
#!../bin/python
from __future__ import print_function
import subprocess
import os
import sys
import argparse
import bob.bio.base
import bob.core
logger = bob.core.log.setup("bob.bio.spear")
# This is the default set of algorithms that can be run using this script.
all_databases = bob.bio.base.resource_keys('database')
# check, which databases can actually be assessed
available_databases = []
for database in all_databases:
try:
bob.bio.base.load_resource(database, 'database')
available_databases.append(database)
except:
pass
# collect all algorithms that we provide baselines for
all_algorithms = []
try:
# try if GMM-based algorithms are available
bob.bio.base.load_resource('gmm', 'algorithm')
bob.bio.base.load_resource('isv', 'algorithm')
bob.bio.base.load_resource('ivector', 'algorithm')
all_algorithms += ['gmm', 'isv', 'ivector']
except:
print("Could not load the GMM-based algorithms. Did you specify bob.bio.gmm in your config file?")
# In these functions, some default experiments are prepared.
# An experiment consists of three configuration files:
# - The features to be extracted
# - The algorithm to be run
# - The grid configuration that it requires (only used when the --grid option is chosen)
CONFIGURATIONS = {
'gmm': dict(
preprocessor = 'energy-2gauss',
extractor = 'mfcc-60',
algorithm = 'gmm-voxforge',
grid = 'demanding',
script = './bin/verify_gmm.py'
),
}
def main(command_line_parameters = None):
# Collect command line arguments
args = command_line_arguments(command_line_parameters)
# Check the database configuration file
has_zt_norm = args.database in ( 'mobio')
has_eval = args.database in ('voxforge', 'mobio')
if not args.evaluate:
# execution of the job is requested
for algorithm in args.algorithms:
logger.info("Executing algorithm '%s'", algorithm)
# get the setup for the desired algorithm
import copy
setup = copy.deepcopy(CONFIGURATIONS[algorithm])
if 'grid' not in setup: setup['grid'] = 'grid'
if 'script' not in setup or (not args.grid and args.parallel is None): setup['script'] = './bin/verify.py'
# select the preprocessor
setup['preprocessor'] = setup['preprocessor'][0 if has_eyes else 1]
if setup['preprocessor'] is None:
logger.warn("Skipping algorithm '%s' since no preprocessor is found that matches the given databases' '%s' configuration", algorithm, args.database)
# this is the default sub-directory that is used
sub_directory = os.path.join(args.baseline_directory, algorithm)
# create the command to the faceverify script
command = [
setup['script'],
'--database', args.database,
'--preprocessor', setup['preprocessor'],
'--extractor', setup['extractor'],
'--algorithm', setup['algorithm'],
'--sub-directory', sub_directory
]
# add grid argument, if available
if args.grid:
command += ['--grid', setup['grid'], '--stop-on-failure']
if args.parallel is not None:
command += ['--grid', 'bob.bio.base.grid.Grid("local", number_of_parallel_processes=%d)' % args.parallel, '--run-local-scheduler', '--stop-on-failure']
# compute ZT-norm if the database provides this setup
if has_zt_norm and args.zt_norm:
command += ['--zt-norm']
# compute results for both 'dev' and 'eval' group if the database provides these
if has_eval:
command += ['--groups', 'dev', 'eval']
# set the directories, if desired; we set both directories to be identical.
if args.directory is not None:
command += ['--temp-directory', os.path.join(args.directory, args.database), '--result-directory', os.path.join(args.directory, args.database)]
# set the verbosity level
if args.verbose:
command += ['-' + 'v'*args.verbose]
# add the command line arguments that were specified on command line
if args.parameters:
command += args.parameters[1:]
# print the command so that it can easily be re-issued
logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command))
# import ipdb; ipdb.set_trace()
# run the command
if not args.dry_run:
subprocess.call(command)
else:
# call the evaluate script with the desired parameters
# get the base directory of the results
is_idiap = os.path.isdir("/idiap")
if args.directory is None:
args.directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results"
if not os.path.exists(args.directory):
if not args.dry_run:
raise IOError("The result directory '%s' cannot be found. Please specify the --directory as it was specified during execution of the algorithms." % args.directory)
# get the result directory of the database
result_dir = os.path.join(args.directory, args.baseline_directory)
if not os.path.exists(result_dir):
if not args.dry_run:
raise IOError("The result directory '%s' for the desired database cannot be found. Did you already run the experiments for this database?" % result_dir)
# iterate over the algorithms and collect the result files
result_dev = []
result_eval = []
result_zt_dev = []
result_zt_eval = []
legends = []
# evaluate the results
for algorithm in args.algorithms:
if not os.path.exists(os.path.join(result_dir, algorithm)):
logger.warn("Skipping algorithm '%s' since the results cannot be found.", algorithm)
continue
protocols = [d for d in os.listdir(os.path.join(result_dir, algorithm)) if os.path.isdir(os.path.join(result_dir, algorithm, d))]
if not len(protocols):
logger.warn("Skipping algorithm '%s' since the results cannot be found.", algorithm)
continue
if len(protocols) > 1:
logger.warn("There are several protocols found in directory '%s'. Here, we use protocol '%s'.", os.path.join(result_dir, algorithm), protocols[0])
nonorm_sub_dir = os.path.join(algorithm, protocols[0], 'nonorm')
ztnorm_sub_dir = os.path.join(algorithm, protocols[0], 'ztnorm')
# collect the resulting files
if os.path.exists(os.path.join(result_dir, nonorm_sub_dir, 'scores-dev')):
result_dev.append(os.path.join(nonorm_sub_dir, 'scores-dev'))
legends.append(algorithm)
if has_eval and os.path.exists(os.path.join(result_dir, nonorm_sub_dir, 'scores-eval')):
result_eval.append(os.path.join(nonorm_sub_dir, 'scores-eval'))
if has_zt_norm:
if os.path.exists(os.path.join(result_dir, ztnorm_sub_dir, 'scores-dev')):
result_zt_dev.append(os.path.join(ztnorm_sub_dir, 'scores-dev'))
if has_eval and os.path.exists(os.path.join(result_dir, ztnorm_sub_dir, 'scores-eval')):
result_zt_eval.append(os.path.join(ztnorm_sub_dir, 'scores-eval'))
# check if we have found some results
if not result_dev:
logger.warn("No result files were detected -- skipping evaluation.")
return
# call the evaluate script
base_command = ['./bin/evaluate.py', '--directory', result_dir, '--legends'] + legends
if 'EER' in args.evaluate:
base_command += ['--criterion', 'EER']
elif 'HTER' in args.evaluate:
base_command += ['--criterion', 'HTER']
if 'ROC' in args.evaluate:
base_command += ['--roc', 'ROCxxx.pdf']
if 'DET' in args.evaluate:
base_command += ['--det', 'DETxxx.pdf']
if 'CMC' in args.evaluate:
base_command += ['--cmc', 'CMCxxx.pdf']
if 'RR' in args.evaluate:
base_command += ['--rr']
if args.verbose:
base_command += ['-' + 'v'*args.verbose]
# first, run the nonorm evaluation
if result_zt_dev:
command = [cmd.replace('xxx','_dev') for cmd in base_command]
else:
command = [cmd.replace('xxx','') for cmd in base_command]
command += ['--dev-files'] + result_dev
if result_eval:
command += ['--eval-files'] + result_eval
logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command))
if not args.dry_run:
subprocess.call(command)
# now, also run the ZT norm evaluation, if available
if result_zt_dev:
command = [cmd.replace('xxx','_eval') for cmd in base_command]
command += ['--dev-files'] + result_zt_dev
if result_zt_eval:
command += ['--eval-files'] + result_zt_eval
logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command))
if not args.dry_run:
subprocess.call(command)
|
bioidiap/bob.bio.spear
|
bob/bio/spear/preprocessor/Energy_2Gauss.py
|
Energy_2Gauss._compute_energy
|
python
|
def _compute_energy(self, rate_wavsample):
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self._voice_activity_detection(energy_array)
# discard isolated speech a number of frames defined in smoothing_window
labels = utils.smoothing(labels,self.smoothing_window)
logger.info("After 2 Gaussian Energy-based VAD there are %d frames remaining over %d", numpy.sum(labels), len(labels))
return labels
|
retreive the speech / non speech labels for the speech sample given by the tuple (rate, wave signal)
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/preprocessor/Energy_2Gauss.py#L122-L132
| null |
class Energy_2Gauss(Base):
"""Extracts the Energy"""
def __init__(
self,
max_iterations = 10, # 10 iterations for the
convergence_threshold = 0.0005,
variance_threshold = 0.0005,
win_length_ms = 20., # 20 ms
win_shift_ms = 10., # 10 ms
smoothing_window = 10, # 10 frames (i.e. 100 ms)
**kwargs
):
# call base class constructor with its set of parameters
Preprocessor.__init__(
self,
max_iterations = max_iterations,
convergence_threshold = convergence_threshold,
variance_threshold = variance_threshold,
win_length_ms = win_length_ms,
win_shift_ms = win_shift_ms,
smoothing_window = smoothing_window,
)
# copy parameters
self.max_iterations = max_iterations
self.convergence_threshold = convergence_threshold
self.variance_threshold = variance_threshold
self.win_length_ms = win_length_ms
self.win_shift_ms = win_shift_ms
self.smoothing_window = smoothing_window
def _voice_activity_detection(self, energy_array):
#########################
## Initialisation part ##
#########################
n_samples = len(energy_array)
label = numpy.array(numpy.ones(n_samples), dtype=numpy.int16)
# if energy does not change a lot, it's not audio maybe?
if numpy.std(energy_array) < 10e-5:
return label * 0
# Add an epsilon small Gaussian noise to avoid numerical issues (mainly due to artificial silence).
energy_array = numpy.array(math.pow(10,-6) * numpy.random.randn(len(energy_array))) + energy_array
# Normalize the energy array
normalized_energy = utils.normalize_std_array(energy_array)
# Apply k-means
kmeans = bob.learn.em.KMeansMachine(2, 1)
m_ubm = bob.learn.em.GMMMachine(2, 1)
kmeans_trainer = bob.learn.em.KMeansTrainer()
bob.learn.em.train(kmeans_trainer, kmeans, normalized_energy, self.max_iterations, self.convergence_threshold)
[variances, weights] = kmeans.get_variances_and_weights_for_each_cluster(normalized_energy)
means = kmeans.means
if numpy.isnan(means[0]) or numpy.isnan(means[1]):
logger.warn("Skip this file since it contains NaN's")
return numpy.array(numpy.zeros(n_samples), dtype=numpy.int16)
# Initializes the GMM
m_ubm.means = means
m_ubm.variances = variances
m_ubm.weights = weights
m_ubm.set_variance_thresholds(self.variance_threshold)
trainer = bob.learn.em.ML_GMMTrainer(True, True, True)
bob.learn.em.train(trainer, m_ubm, normalized_energy, self.max_iterations, self.convergence_threshold)
means = m_ubm.means
weights = m_ubm.weights
if means[0] < means[1]:
higher = 1
lower = 0
else:
higher = 0
lower = 1
higher_mean_gauss = m_ubm.get_gaussian(higher)
lower_mean_gauss = m_ubm.get_gaussian(lower)
k=0
for i in range(n_samples):
if higher_mean_gauss.log_likelihood(normalized_energy[i]) < lower_mean_gauss.log_likelihood( normalized_energy[i]):
label[i]=0
else:
label[i]=label[i] * 1
return label
def __call__(self, input_signal, annotations=None):
"""labels speech (1) and non-speech (0) parts of the given input wave file using 2 Gaussian-modeled Energy
Input parameter:
* input_signal[0] --> rate
* input_signal[1] --> signal
"""
labels = self._compute_energy(input_signal)
rate = input_signal[0]
data = input_signal[1]
if (labels == 0).all():
logger.warn("No Audio was detected in the sample!")
return None
return rate, data, labels
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/extraction.py
|
calc_mean
|
python
|
def calc_mean(c0, c1=[]):
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0)
|
Calculates the mean of the data.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/extraction.py#L32-L37
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Pavel Korshunov <Pavel.Korshunov@idiap.ch>
# Tue 22 Sep 17:21:35 CEST 2015
#
# Copyright (C) 2012-2015 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import logging
logger = logging.getLogger("bob.bio.spear")
def zeromean_unitvar_norm(data, mean, std):
""" Normalized the data with zero mean and unit variance. Mean and variance are in numpy.ndarray format"""
return numpy.divide(data - mean, std)
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
"""
@param c0
@param c1
@param nonStdZero if the std was zero, convert to one. This will avoid a zero division
"""
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
""" Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
"""
if not features.size:
raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible")
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
speech, = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
silences, = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [)
else:
logger.error("vad_filter_features(): VAD labels should be the same length as energy bands")
logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape))
return filtered_features
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/extraction.py
|
calc_std
|
python
|
def calc_std(c0, c1=[]):
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
|
Calculates the variance of the data.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/extraction.py#L40-L51
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Pavel Korshunov <Pavel.Korshunov@idiap.ch>
# Tue 22 Sep 17:21:35 CEST 2015
#
# Copyright (C) 2012-2015 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import logging
logger = logging.getLogger("bob.bio.spear")
def zeromean_unitvar_norm(data, mean, std):
""" Normalized the data with zero mean and unit variance. Mean and variance are in numpy.ndarray format"""
return numpy.divide(data - mean, std)
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0)
"""
@param c0
@param c1
@param nonStdZero if the std was zero, convert to one. This will avoid a zero division
"""
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
""" Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
"""
if not features.size:
raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible")
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
speech, = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
silences, = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [)
else:
logger.error("vad_filter_features(): VAD labels should be the same length as energy bands")
logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape))
return filtered_features
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/extraction.py
|
calc_mean_std
|
python
|
def calc_mean_std(c0, c1=[], nonStdZero=False):
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std
|
Calculates both the mean of the data.
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/extraction.py#L59-L66
|
[
"def calc_mean(c0, c1=[]):\n \"\"\" Calculates the mean of the data.\"\"\"\n if c1 != []:\n return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.\n else:\n return numpy.mean(c0, 0)\n",
"def calc_std(c0, c1=[]):\n \"\"\" Calculates the variance of the data.\"\"\"\n if c1 == []:\n return numpy.std(c0, 0)\n prop = float(len(c0)) / float(len(c1))\n if prop < 1:\n p0 = int(math.ceil(1 / prop))\n p1 = 1\n else:\n p0 = 1\n p1 = int(math.ceil(prop))\n return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)\n"
] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Pavel Korshunov <Pavel.Korshunov@idiap.ch>
# Tue 22 Sep 17:21:35 CEST 2015
#
# Copyright (C) 2012-2015 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import logging
logger = logging.getLogger("bob.bio.spear")
def zeromean_unitvar_norm(data, mean, std):
""" Normalized the data with zero mean and unit variance. Mean and variance are in numpy.ndarray format"""
return numpy.divide(data - mean, std)
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0)
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
"""
@param c0
@param c1
@param nonStdZero if the std was zero, convert to one. This will avoid a zero division
"""
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
""" Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
"""
if not features.size:
raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible")
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
speech, = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
silences, = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [)
else:
logger.error("vad_filter_features(): VAD labels should be the same length as energy bands")
logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape))
return filtered_features
|
bioidiap/bob.bio.spear
|
bob/bio/spear/utils/extraction.py
|
vad_filter_features
|
python
|
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
if not features.size:
raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible")
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
speech, = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
silences, = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [)
else:
logger.error("vad_filter_features(): VAD labels should be the same length as energy bands")
logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape))
return filtered_features
|
Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
|
train
|
https://github.com/bioidiap/bob.bio.spear/blob/9f5d13d2e52d3b0c818f4abaa07cda15f62a34cd/bob/bio/spear/utils/extraction.py#L69-L120
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Pavel Korshunov <Pavel.Korshunov@idiap.ch>
# Tue 22 Sep 17:21:35 CEST 2015
#
# Copyright (C) 2012-2015 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
import logging
logger = logging.getLogger("bob.bio.spear")
def zeromean_unitvar_norm(data, mean, std):
""" Normalized the data with zero mean and unit variance. Mean and variance are in numpy.ndarray format"""
return numpy.divide(data - mean, std)
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.
else:
return numpy.mean(c0, 0)
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
"""
@param c0
@param c1
@param nonStdZero if the std was zero, convert to one. This will avoid a zero division
"""
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if (nonStdZero):
std[std == 0] = 1
return mi, std
|
datacamp/protowhat
|
protowhat/sct_syntax.py
|
F._from_func
|
python
|
def _from_func(cls, f, *args, _attr_scts=None, **kwargs):
func_chain = cls(attr_scts=_attr_scts)
func_chain._stack.append([f, args, kwargs])
return func_chain
|
Creates a function chain starting with the specified SCT (f), and its arguments.
|
train
|
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/sct_syntax.py#L101-L105
| null |
class F(Chain):
def __init__(self, stack=None, attr_scts=None):
self._crnt_sct = None
self._stack = [] if stack is None else stack
self._waiting_on_call = False
self._attr_scts = {} if attr_scts is None else attr_scts
def __call__(self, *args, **kwargs):
if self._crnt_sct:
# calling an SCT function (after attribute access)
call_data = (self._crnt_sct, args, kwargs)
return self.__class__(self._stack + [call_data], self._attr_scts)
else:
# running the chain
state = kwargs.get("state") or args[0]
return reduce(
lambda s, cd: self._call_from_data(s, *cd), self._stack, state
)
@staticmethod
def _call_from_data(state, f, args, kwargs):
return f(state, *args, **kwargs)
@classmethod
|
datacamp/protowhat
|
protowhat/checks/check_files.py
|
check_file
|
python
|
def check_file(
state,
fname,
missing_msg="Did you create a file named `{}`?",
is_dir_msg="Want to check a file named `{}`, but found a directory.",
parse=True,
use_fs=True,
use_solution=False,
):
if use_fs:
p = Path(fname)
if not p.exists():
state.report(Feedback(missing_msg.format(fname))) # test file exists
if p.is_dir():
state.report(Feedback(is_dir_msg.format(fname))) # test its not a dir
code = p.read_text()
else:
code = _get_fname(state, "student_code", fname)
if code is None:
state.report(Feedback(missing_msg.format(fname))) # test file exists
sol_kwargs = {"solution_code": None, "solution_ast": None}
if use_solution:
sol_code = _get_fname(state, "solution_code", fname)
if sol_code is None:
raise Exception("Solution code does not have file named: %s" % fname)
sol_kwargs["solution_code"] = sol_code
sol_kwargs["solution_ast"] = (
state.parse(sol_code, test=False) if parse else None
)
return state.to_child(
student_code=code,
student_ast=state.parse(code) if parse else None,
fname=fname,
**sol_kwargs
)
|
Test whether file exists, and make its contents the student code.
Note: this SCT fails if the file is a directory.
|
train
|
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_files.py#L7-L50
|
[
"def _get_fname(state, attr, fname):\n code_dict = getattr(state, attr)\n if not isinstance(code_dict, Mapping):\n raise TypeError(\n \"Can't get {} from state.{}, which must be a \" \"dictionary or Mapping.\"\n )\n\n return code_dict.get(fname)\n"
] |
from pathlib import Path
from collections.abc import Mapping
from protowhat.Feedback import Feedback
def _get_fname(state, attr, fname):
code_dict = getattr(state, attr)
if not isinstance(code_dict, Mapping):
raise TypeError(
"Can't get {} from state.{}, which must be a " "dictionary or Mapping."
)
return code_dict.get(fname)
def has_dir(state, fname, incorrect_msg="Did you create a directory named `{}`?"):
"""Test whether a directory exists."""
if not Path(fname).is_dir():
state.report(Feedback(incorrect_msg.format(fname)))
return state
|
datacamp/protowhat
|
protowhat/checks/check_files.py
|
has_dir
|
python
|
def has_dir(state, fname, incorrect_msg="Did you create a directory named `{}`?"):
if not Path(fname).is_dir():
state.report(Feedback(incorrect_msg.format(fname)))
return state
|
Test whether a directory exists.
|
train
|
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/checks/check_files.py#L63-L68
| null |
from pathlib import Path
from collections.abc import Mapping
from protowhat.Feedback import Feedback
def check_file(
state,
fname,
missing_msg="Did you create a file named `{}`?",
is_dir_msg="Want to check a file named `{}`, but found a directory.",
parse=True,
use_fs=True,
use_solution=False,
):
"""Test whether file exists, and make its contents the student code.
Note: this SCT fails if the file is a directory.
"""
if use_fs:
p = Path(fname)
if not p.exists():
state.report(Feedback(missing_msg.format(fname))) # test file exists
if p.is_dir():
state.report(Feedback(is_dir_msg.format(fname))) # test its not a dir
code = p.read_text()
else:
code = _get_fname(state, "student_code", fname)
if code is None:
state.report(Feedback(missing_msg.format(fname))) # test file exists
sol_kwargs = {"solution_code": None, "solution_ast": None}
if use_solution:
sol_code = _get_fname(state, "solution_code", fname)
if sol_code is None:
raise Exception("Solution code does not have file named: %s" % fname)
sol_kwargs["solution_code"] = sol_code
sol_kwargs["solution_ast"] = (
state.parse(sol_code, test=False) if parse else None
)
return state.to_child(
student_code=code,
student_ast=state.parse(code) if parse else None,
fname=fname,
**sol_kwargs
)
def _get_fname(state, attr, fname):
code_dict = getattr(state, attr)
if not isinstance(code_dict, Mapping):
raise TypeError(
"Can't get {} from state.{}, which must be a " "dictionary or Mapping."
)
return code_dict.get(fname)
|
datacamp/protowhat
|
protowhat/utils_ast.py
|
dump
|
python
|
def dump(node, config):
if config.is_node(node):
fields = OrderedDict()
for name in config.fields_iter(node):
attr = config.field_val(node, name)
if attr is not None:
fields[name] = dump(attr, config)
return {"type": config.node_type(node), "data": fields}
elif config.is_list(node):
return [dump(x, config) for x in config.list_iter(node)]
else:
return config.leaf_val(node)
|
Convert a node tree to a simple nested dict
All steps in this conversion are configurable using DumpConfig
dump dictionary node: {"type": str, "data": dict}
|
train
|
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/utils_ast.py#L30-L48
| null |
from ast import AST
from collections import OrderedDict
class DumpConfig:
def __init__(
self,
is_node=lambda node: isinstance(node, AST),
node_type=lambda node: node.__class__.__name__,
fields_iter=lambda node: node._fields,
field_val=lambda node, field: getattr(node, field, None),
is_list=lambda node: isinstance(node, list),
list_iter=id,
leaf_val=id,
):
"""
Configuration to convert a node tree to the dump format
The default configuration can be used to dump a tree of AstNodes
"""
self.is_node = is_node
self.node_type = node_type
self.fields_iter = fields_iter
self.field_val = field_val
self.is_list = is_list
self.list_iter = list_iter
self.leaf_val = leaf_val
class AstNode(AST):
_fields = ()
_priority = 1
def get_text(self, text):
raise NotImplementedError()
def get_position(self):
raise NotImplementedError()
def __str__(self):
els = [k for k in self._fields if getattr(self, k, None) is not None]
return "{}: {}".format(self.__class__.__name__, ", ".join(els))
def __repr__(self):
field_reps = [
(k, repr(getattr(self, k)))
for k in self._fields
if getattr(self, k, None) is not None
]
args = ", ".join("{} = {}".format(k, v) for k, v in field_reps)
return "{}({})".format(self.__class__.__name__, args)
class ParseError(Exception):
pass
class AstModule:
"""Subclasses can be used to instantiate a Dispatcher"""
AstNode = AstNode
ParseError = ParseError
nodes = dict()
speaker = None
@classmethod
def parse(cls, code, **kwargs):
raise NotImplementedError("This method needs to be defined in a subclass.")
@classmethod
def dump(cls, tree):
return dump(tree, DumpConfig())
# methods below are for updating an AstModule subclass based on data in the dump dictionary format --
@classmethod
def load(cls, node):
if not isinstance(node, dict):
return node # return primitives
type_str = node["type"]
data = node["data"]
obj = cls._instantiate_node(type_str, tuple(data.keys()))
for field_name, value in data.items():
if isinstance(value, (list, tuple)):
child = [cls.load(entry) for entry in value]
else:
child = cls.load(value)
setattr(obj, field_name, child)
return obj
@classmethod
def _instantiate_node(cls, type_str, fields):
# TODO: implement on AstNode (+ interface to get classes)
node_cls = cls.nodes.get(type_str, None)
if not node_cls:
node_cls = type(type_str, (cls.AstNode,), {"_fields": fields})
cls.nodes[type_str] = node_cls
return node_cls()
|
datacamp/protowhat
|
protowhat/utils.py
|
legacy_signature
|
python
|
def legacy_signature(**kwargs_mapping):
def signature_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
redirected_kwargs = {
kwargs_mapping[k] if k in kwargs_mapping else k: v
for k, v in kwargs.items()
}
return f(*args, **redirected_kwargs)
return wrapper
return signature_decorator
|
This decorator makes it possible to call a function using old argument names
when they are passed as keyword arguments.
@legacy_signature(old_arg1='arg1', old_arg2='arg2')
def func(arg1, arg2=1):
return arg1 + arg2
func(old_arg1=1) == 2
func(old_arg1=1, old_arg2=2) == 3
|
train
|
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/utils.py#L4-L28
| null |
from functools import wraps
|
datacamp/protowhat
|
protowhat/State.py
|
State.to_child
|
python
|
def to_child(self, append_message="", **kwargs):
bad_pars = set(kwargs) - set(self._child_params)
if bad_pars:
raise KeyError("Invalid init params for State: %s" % ", ".join(bad_pars))
child = copy(self)
for k, v in kwargs.items():
setattr(child, k, v)
child.parent = self
# append messages
if not isinstance(append_message, dict):
append_message = {"msg": append_message, "kwargs": {}}
child.messages = [*self.messages, append_message]
return child
|
Basic implementation of returning a child state
|
train
|
https://github.com/datacamp/protowhat/blob/a392b4e51e07a2e50e7b7f6ad918b3f5cbb63edc/protowhat/State.py#L120-L137
| null |
class State:
def __init__(
self,
student_code,
solution_code,
pre_exercise_code,
student_conn,
solution_conn,
student_result,
solution_result,
reporter,
force_diagnose=False,
highlighting_disabled=False,
solution_ast=None,
student_ast=None,
fname=None,
ast_dispatcher=None,
history=tuple(),
):
for k, v in locals().items():
if k != "self":
setattr(self, k, v)
if ast_dispatcher is None:
self.ast_dispatcher = self.get_dispatcher()
self.messages = []
# Parse solution and student code
if isinstance(self.solution_code, str) and self.solution_ast is None:
self.solution_ast = self.parse(self.solution_code, test=False)
if isinstance(self.student_code, str) and self.student_ast is None:
self.student_ast = self.parse(self.student_code)
self._child_params = inspect.signature(State.__init__).parameters
def parse(self, text, test=True):
result = None
if self.ast_dispatcher:
try:
result = self.ast_dispatcher.parse(text)
except self.ast_dispatcher.ParseError as e:
if test:
self.report(Feedback(e.message))
else:
raise InstructorError(
"Something went wrong when parsing PEC or solution code: %s"
% str(e)
)
return result
def get_dispatcher(self):
return DummyDispatcher()
def get_ast_path(self):
rev_checks = filter(
lambda x: x["type"] in ["check_edge", "check_node"], reversed(self.history)
)
try:
last = next(rev_checks)
if last["type"] == "check_node":
# final check was for a node
return self.ast_dispatcher.describe(
last["node"],
index=last["kwargs"]["index"],
msg="{index}{node_name}",
)
else:
node = next(rev_checks)
if node["type"] == "check_node":
# checked for node, then for target, so can give rich description
return self.ast_dispatcher.describe(
node["node"],
field=last["kwargs"]["name"],
index=last["kwargs"]["index"],
msg="{index}{field_name} of the {node_name}",
)
except StopIteration:
return self.ast_dispatcher.describe(self.student_ast, "{node_name}")
def report(self, feedback: Feedback):
if feedback.highlight is None and self is not getattr(self, "root_state", None):
feedback.highlight = self.student_ast
test = Fail(feedback)
return self.do_test(test)
def do_test(self, test: Test):
return self.reporter.do_test(test)
def build_message(self, tail_msg="", fmt_kwargs=None, append=True):
if not fmt_kwargs:
fmt_kwargs = {}
out_list = []
# add trailing message to msg list
msgs = self.messages[:] + [{"msg": tail_msg, "kwargs": fmt_kwargs}]
# format messages in list, by iterating over previous, current, and next message
for prev_d, d, next_d in zip([{}, *msgs[:-1]], msgs, [*msgs[1:], {}]):
tmp_kwargs = {
"parent": prev_d.get("kwargs"),
"child": next_d.get("kwargs"),
"this": d["kwargs"],
**d["kwargs"],
}
# don't bother appending if there is no message
if not d or not d["msg"]:
continue
# TODO: rendering is slow in tests (40% of test time)
out = Template(d["msg"].replace("__JINJA__:", "")).render(tmp_kwargs)
out_list.append(out)
# if highlighting info is available, don't put all expand messages
if getattr(self, "highlight", None) and not self.highlighting_disabled:
out_list = out_list[-3:]
if append:
return "".join(out_list)
else:
return out_list[-1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.